query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Sets the peer of this Dial.
|
def peer(self, peer):
if peer is None:
raise ValueError("Invalid value for `peer`, must not be `None`") # noqa: E501
self._peer = peer
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def setPeer (self, peer):\n\t\tself.peer = peer",
"def setPeerToPeerNetwork(self, peerToPeerNetwork):\r\n raise NotImplementedError()",
"def peer(self, value: Optional[MicrobitPeer]) -> None:\n if self.__peer is not None:\n self.__peer.remove_listener(self.__execute)\n\n if value is not None:\n value.add_listener(self.__execute)\n\n self.__peer = value\n self.__sync_x()\n self.__sync_y()\n self.__sync_z()\n self.__sync_current_gesture()",
"def connect_to_peer(self):\n pass",
"def set_peer_working(self, peer_id):\n self.peers[peer_id].set_working_state()",
"def peer(self) -> str:\n return \"{0.host}:{0.port}\".format(self.transport.getPeer())",
"def add_new_peer(self, peer_id, peer_host, port):\n if peer_id not in self.chain_instance.peer_connect_dict:\n self.chain_instance.peer_connect_dict[peer_id] = {'host': peer_host, 'port': port}",
"def add_peer(self, peer_id, peer_ip):\n self.peers.update({peer_id: peer.Peer(peer_ip)})",
"def set_port(self, party_port) -> None:\n\n self._port = party_port",
"def peer_addresses(self, peer_addresses):\n\n self._peer_addresses = peer_addresses",
"def peer(self) -> Optional[MicrobitPeer]:\n return self.__peer",
"def peer_device(self) -> \"ASADevice\":\n if self._peer_device is None:\n self._peer_device = self.__class__(\n str(self.peer_ip_address), self.username, self.password, self.secret, self.port, **self.kwargs\n )\n else:\n self._peer_device.open()\n\n log.debug(\"Host %s: Peer device %s.\", self.host, self._peer_device)\n return self._peer_device",
"def connect_to_peer(self, uri):\n self.init_socket(uri)",
"def set_peer_waiting(self, peer_id, port):\n self.peers[peer_id].set_waiting_state(port)",
"def _set_next_hop_learned_from_peer(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name=\"next-hop-learned-from-peer\", rest_name=\"next-hop-learned-from-peer\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"next_hop_learned_from_peer must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=unicode, is_leaf=True, yang_name=\"next-hop-learned-from-peer\", rest_name=\"next-hop-learned-from-peer\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)\"\"\",\n })\n\n self.__next_hop_learned_from_peer = t\n if hasattr(self, '_set'):\n self._set()",
"def setConnectPortal(self,otherPortal):\n self._connectPortal = otherPortal",
"def getPeer(self):\n return address.IPv4Address('TCP', *((self.host, self.port) + ('INET',)))",
"def connect(self, peer):\n peer.listen()\n time.sleep(0.1)\n client_thread = ClientThread(peer.address, self.message_queue, self.queue_lock, self.on_message_received)\n client_thread.start()\n self.connected_as_client = True # TODO only if successful",
"def peer_device(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"peer_device\"), kwargs)",
"def connect_peer(self, peer, first=True):\n\n # Filter peers that we have send a connect request in previous loops\n # but we haven't seen a reply, yet.\n if peer.next_heart_beat_time is not None and first:\n logger.log(TRACE, \"%s is waiting for a reply until %r\",\n peer, peer.next_heart_beat_time)\n return\n\n # Compose the message.\n message = Message(\n source=self.app.uuid,\n to=peer.uuid,\n previous_hop=None,\n next_hop=peer.uuid,\n command=self.command_id,\n reply=False,\n handler=self,\n host=self.app.receiver.bind_address,\n port=self.app.receiver.bind_port,\n )\n\n if first:\n # Compute the timeout.\n peer.next_heart_beat_time = self.app.tick + UNRESPONSIVE_THRESHOLD\n peer.slow_heart_beat_down = 0\n logger.log(TRACE, \"First message composed for connect \"\n \"attempt to %s: %r; will wait until %r\",\n peer, message, peer.next_heart_beat_time)\n else:\n # Take into consideration the history of the peer.\n peer.schedule_heart_beat(self.app)\n logger.log(TRACE, \"Message composed for subsequent connect \"\n \"attempt to %s: %r; will wait until %r\",\n peer, message, peer.next_heart_beat_time)\n\n # We directly enqueue the message.\n self.app.sender.connection_queue.enqueue({peer: message})",
"def set_follower(self, follower):\n self.follower = follower",
"async def store_peers(self, peer: Peer):\n await self.peers.store(peer)",
"def set_follow(self, follow):\n self.follow = follow",
"def add_peer(self, peer, ws_extra_headers=None, ws_heartbeat=None):\n logger.info(\"Connecting to peer {}\".format(peer))\n return self.connection_manager.get_peer(\n peer,\n reconnect=not self.receptor.config._is_ephemeral,\n ws_extra_headers=ws_extra_headers,\n ws_heartbeat=ws_heartbeat,\n )",
"def peer_address(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"peer_address\")",
"def connect(self, telegram_bot, message_sender):\n\n self.__telegram_bot = telegram_bot\n self.__message_sender = message_sender",
"def connecting_peer(self, peer):\n if peer.next_heart_beat_time < self.app.tick:\n self.declare_no_connection(peer)\n else:\n logger.log(TRACE, \"%s is connecting (has until %r, now is %r)\",\n peer, peer.next_heart_beat_time, self.app.tick)",
"def add_peer(self, writer):\r\n address = self.get_address_string(writer)\r\n self.connection_pool[address] = writer\r\n logger.info(\"Added new peer to pool\", address=address)",
"def connection_made(self, transport):\n self.transport = transport\n peername = transport.get_extra_info('peername')\n self.ip = peername[0]\n self.client = \"{:s}:{:d}\".format(*peername)\n logger.debug('Connection from {}'.format(peername))\n clients.append(self)\n self.env = envs[self.ip]",
"def getPeer(self):\n return \"Peer:PID:\" + str(self.transport.pid)"
] |
[
"0.83918417",
"0.67424357",
"0.6415004",
"0.60654163",
"0.5945206",
"0.5834176",
"0.5686866",
"0.5547047",
"0.54729116",
"0.54373324",
"0.54167396",
"0.53934383",
"0.5380653",
"0.5360441",
"0.5354226",
"0.53020024",
"0.5296971",
"0.5267418",
"0.52219296",
"0.52138495",
"0.5179136",
"0.5177158",
"0.5096166",
"0.50865424",
"0.5085722",
"0.5077557",
"0.50725174",
"0.5060611",
"0.5054306",
"0.50514317"
] |
0.7684957
|
1
|
Provide video codec vcodec = "h264" acodec = "copy" extra = "" split_cmd = "ffmpeg i '%s' vcodec %s acodec %s y %s" % (file_path, vcodec, acodec, extra) s_cmd = " i '%s' vcodec %s acodec %s"%(file_path, vcodec, acodec)
|
def split_video_random(file_path, start_pos, split_length, out_path):
s_cmd = " -i '%s'"%(file_path) #use default CODEC
try:
fileext = file_path.split(".")[-1]
except IndexError as e:
raise IndexError("No ext. in filename. Error: " + str(e))
split_start = start_pos
split_length = split_length
head, tail = os.path.split(file_path)
name, ext = tail.split('.')
filebase=name+'_'+str(start_pos)+'-'+str(split_length)
dstfilebase = out_path + '/' + filebase # create output file base
#split_str = ""
#split_str += " -ss " + str(split_start) + " -t " + str(split_length) + " '"+ dstfilebase + "." + fileext + "'"
s_str = ""
#s_str += "ffmpeg"+" -ss "+str(split_start)+" -t "+str(split_length) + s_cmd + " '"+dstfilebase + "." + fileext + "'"
s_str += "ffmpeg" + " -ss " + str(split_start) + s_cmd + " -t " + str(split_length) + " '"+ dstfilebase + "." + fileext + "'"
print("########################################################")
#print "About to run: "+split_cmd+split_str
print("About to run: "+s_str)
print("########################################################")
#output = subprocess.Popen(split_cmd+split_str, shell = True, stdout = subprocess.PIPE).stdout.read()
output = subprocess.Popen(s_str, shell=True, stdout=subprocess.PIPE).stdout.read()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def split_by_manifest(filename, manifest, vcodec=\"copy\", acodec=\"copy\",\n extra=\"\", **kwargs):\n if not os.path.exists(manifest):\n raise SystemExit\n\n with open(manifest) as manifest_file:\n manifest_type = manifest.split(\".\")[-1]\n if manifest_type == \"json\":\n config = json.load(manifest_file)\n elif manifest_type == \"csv\":\n config = csv.DictReader(manifest_file)\n else:\n raise SystemExit\n\n split_cmd = \"ffmpeg -i '%s' -vcodec %s -acodec %s -y %s\" % (filename,\n vcodec,\n acodec,\n extra)\n split_count = 1\n split_error = []\n try:\n fileext = filename.split(\".\")[-1]\n except IndexError as e:\n raise IndexError(\"No . in filename. Error: \" + str(e))\n for video_config in config:\n split_str = \"\"\n try:\n split_start = video_config[\"start_time\"]\n split_length = video_config.get(\"end_time\", None)\n if not split_length:\n split_length = video_config[\"length\"]\n filebase = video_config[\"rename_to\"]\n if fileext in filebase:\n filebase = \".\".join(filebase.split(\".\")[:-1])\n\n split_str += \" -ss \" + str(split_start) + \" -t \" + \\\n str(split_length) + \\\n \" '\"+ filebase + \".\" + fileext + \\\n \"'\"\n output = subprocess.Popen(split_cmd+split_str,\n shell = True, stdout =\n subprocess.PIPE).stdout.read()\n except KeyError as e:\n raise SystemExit",
"def check_video_format(movie_file, desired_format='.mp4', original_format='.avi'):\n\n if not os.path.isfile(movie_file+original_format):\n print 'Error. avi file does not exist:'+movie_file+'.avi'\n if not os.path.isfile(movie_file+desired_format):\n cmd = ['ffmpeg']\n cmd += ['-i', movie_file+original_format]\n cmd += [movie_file+desired_format]\n cmd_string = ''.join([\"%s \" % el for el in cmd])\n #print '-->Running: ', cmd_string\n p = subprocess.Popen(cmd, shell=False)\n p.wait()",
"def _transcode_ffmpeg_args(mpeg_filename, mp4_filename, res):\n\n \"\"\"\n 697 ffmpeg -i Chef\\ Wanted\\ With\\ Anne\\ Burrell\\:\\ \\\"The\\ Re-Launch\\\".mpg\n -strict experimental -acodec aac -ac 2 -ab 160k -s 960x540 -vcodec libx264\n -vpre iPod640 -b 1200k -f mp4 -threads 0 chef.conversionmatrixsettings.mp4\n \"\"\"\n return [FFMPEG, \"-i\", mpeg_filename, \"-strict\", \"experimental\",\n \"-acodec\", \"aac\", \"-ac\", \"2\", \"-ab\", \"160k\", \"-s\", res,\n \"-vcodec\", \"libx264\", \"-vpre\", \"iPod640\", \"-b\", \"1200k\",\n \"-f\", \"mp4\", \"-threads\", \"0\", mp4_filename]",
"def extract_vob(in_vob, guid):\n\t#Detect interlacing.\n\tmediainfo_command = \"mediainfo --Inform='Video;%ScanType%,%ScanOrder%' \" + in_vob\n\tprint(mediainfo_command)\n\tprocess = subprocess.Popen(mediainfo_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\texit_code = process.wait()\n\tif exit_code != 0:\n\t\traise Exception(\"Calling Mediainfo on {in_vob} failed with exit code {exit_code}.\".format(in_vob=in_vob, exit_code=exit_code))\n\tmediainfo_parts = cout.decode(\"utf-8\").split(\",\")\n\tis_interlaced = mediainfo_parts[0] == \"Interlaced\"\n\tfield_order = mediainfo_parts[1].lower().strip()\n\tprint(\"Interlace detection:\", is_interlaced, field_order, \"(\", mediainfo_parts, \")\")\n\n\tffmpeg_command = [\"ffmpeg\", \"-i\", in_vob]\n\tprint(ffmpeg_command)\n\tprocess = subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\tprocess.wait() #Ignore the exit code. It always fails.\n\tvobinfo = cerr.decode(\"utf-8\")\n\ttracks = []\n\tfor match in re.finditer(r\" Stream #0:(\\d+)\\[0x[0-9a-f]+\\]: (\\w+): ([^\\n]+)\", vobinfo):\n\t\ttrack_nr = match.group(1)\n\t\ttrack_type = match.group(2)\n\t\ttrack_codec = match.group(3)\n\t\tnew_track = track.Track()\n\t\tnew_track.from_vob(track_nr, track_type, track_codec, is_interlaced, field_order)\n\t\tnew_track.file_name = guid + \"-T\" + str(new_track.track_nr) + \".\" + new_track.codec\n\t\tif new_track.type != \"unknown\":\n\t\t\ttracks.append(new_track)\n\n\t#Generate the parameters to pass to ffmpeg.\n\ttrack_params = [\"-i\", in_vob]\n\tfor track_metadata in tracks:\n\t\ttrack_params.append(\"-map\")\n\t\ttrack_params.append(\"0:\" + str(track_metadata.track_nr))\n\t\ttrack_params.append(\"-c\")\n\t\ttrack_params.append(\"copy\")\n\t\ttrack_params.append(track_metadata.file_name)\n\n\t#Extract all tracks.\n\tprint(\"---- Extracting tracks...\")\n\tffmpeg(*track_params)\n\n\treturn tracks",
"def write_video_ffmpeg(\n itr: Iterator[np.ndarray],\n out_file: str | Path,\n fps: int = 30,\n out_fps: int = 30,\n vcodec: str = \"libx264\",\n input_fmt: str = \"rgb24\",\n output_fmt: str = \"yuv420p\",\n quite=False\n) -> None:\n\n first_img = next(itr)\n height, width, _ = first_img.shape\n\n stream = ffmpeg.input(\"pipe:\", format=\"rawvideo\", pix_fmt=input_fmt, s=f\"{width}x{height}\", r=fps)\n stream = ffmpeg.output(stream, str(out_file), pix_fmt=output_fmt, vcodec=vcodec, r=out_fps)\n if quite:\n stream = stream.global_args('-loglevel', 'quiet')\n stream = ffmpeg.overwrite_output(stream)\n stream = ffmpeg.run_async(stream, pipe_stdin=True)\n\n def write_frame(img: np.ndarray) -> None:\n stream.stdin.write(as_uint8(img).tobytes())\n\n # Writes all the video frames to the file.\n write_frame(first_img)\n for img in itr:\n write_frame(img)\n\n stream.stdin.close()\n stream.wait()\n print('Done.')",
"def reencode(filepath, loglevel='panic'):\n try:\n import ffmpeg\n except ImportError:\n logger.error(\n 'Import Error! Cant import ffmpeg. '\n 'Annotations operations will be limited. import manually and fix errors')\n raise\n if not os.path.isfile(filepath):\n raise IOError('File doesnt exists: {}'.format(filepath))\n # re encode video without b frame and as mp4\n basename, ext = os.path.splitext(filepath)\n output_filepath = os.path.join(basename, os.path.basename(filepath).replace(ext, '.mp4'))\n if not os.path.isdir(os.path.dirname(output_filepath)):\n os.makedirs(os.path.dirname(output_filepath))\n try:\n stream = ffmpeg.input(filepath, **{'loglevel': loglevel}).output(output_filepath,\n **{'x264opts': 'bframes=0',\n 'f': 'mp4'})\n ffmpeg.overwrite_output(stream).run()\n except Exception as e:\n logger.exception('ffmpeg error in disassemble:')\n raise\n\n output_probe = Videos.get_info(output_filepath)\n start_time = eval(output_probe['streams'][0]['start_time'])\n fps = eval(output_probe['streams'][0]['avg_frame_rate'])\n has_b_frames = output_probe['streams'][0]['has_b_frames']\n start_frame = fps * start_time\n if start_time != 0:\n logger.warning('Video start_time is not 0!')\n if has_b_frames != 0:\n logger.warning('Video still has b frames!')\n return output_filepath",
"def loadVideo( iFileName, iFrameSize = (576, 720) ):\n import sys\n import subprocess as sp\n # ustvari klic ffmpeg in preusmeri izhod v cevovod\n command = [ 'ffmpeg',\n '-i', iFileName,\n '-f', 'image2pipe',\n '-pix_fmt', 'rgb24',\n '-vcodec', 'rawvideo', '-']\n pipe = sp.Popen(command, stdout = sp.PIPE, bufsize=10**8)\n # definiraj novo spremeljivko\n oVideo = np.array([])\n iFrameSize = np.asarray( iFrameSize )\n frameCount = 0\n # zacni neskoncno zanko\n while True:\n frameCount += 1\n# print( 'Berem okvir %d ...' % frameCount )\n print(\"\\rBerem okvir %d ...\" % frameCount, end=\"\")\n # preberi Y*X*3 bajtov (= 1 okvir)\n raw_frame = pipe.stdout.read(np.prod(iFrameSize)*3)\n # pretvori prebrane podatke v numpy polje\n frame = np.fromstring(raw_frame, dtype='uint8') \n # preveri ce je velikost ustrezna, sicer prekini zanko\n if frame.size != (np.prod(iFrameSize)*3):\n print(\" koncano!\\n\")\n break;\n # preoblikuj dimenzije in pretvori v sivinsko sliko\n frame = colorToGray( frame.reshape((iFrameSize[0],iFrameSize[1],3)) )\n # sprazni medpomnilnik \n pipe.stdout.flush() \n # vnesi okvir v izhodno sprememnljivko\n if oVideo.size == 0:\n oVideo = frame\n oVideo = oVideo[...,None]\n else:\n oVideo = np.concatenate((oVideo,frame[...,None]), axis=2)\n # zapri cevovod\n pipe.terminate()\n # vrni izhodno spremenljivko\n return oVideo",
"def __init__(self,vid_path:str,num_frames:int=None,vid_flow_direction:str='left'):\n \n self.num_frames=num_frames\n if vid_path.split('.')[-1]=='cine' or vid_flow_direction!='left':\n #This is a cine file or needs to be rotated, convert to mp4\n print('Converting .cine file to mp4 (lossless)')\n #detect platform so we can correct file paths for ffmpeg\n is_win=re.compile('.*[Ww]in.*')\n if is_win.match(sys.platform):\n corrected_vid_path='\"'+vid_path+'\"'\n else:\n #Put escape characters in front of spaces in file name\n corrected_vid_path=[]\n for c in vid_path:\n if c==' ':\n corrected_vid_path.append('\\\\')\n corrected_vid_path.append(c)\n corrected_vid_path=''.join(corrected_vid_path)\n if vid_flow_direction=='up':\n rotate='-vf \"transpose=2\" '\n elif vid_flow_direction=='left':\n rotate=''\n elif vid_flow_direction=='right':\n rotate='-vf \"transpose=2,transpose=2\" '\n else:\n raise Exception(\"vid_flow_direction must be 'up', 'left' or 'right'\")\n if num_frames!=None:\n frames='-frames:v {0} '.format(num_frames)\n else:\n frames=''\n os_handle,new_file_path=tempfile.mkstemp(suffix='.mp4')\n #close file, we don't work with it directly\n os.close(os_handle)\n ffmpeg_command='ffmpeg -y -i {orig_file} {frames}{rotate}-f mp4 -crf 0 {new_file}'.format(orig_file=corrected_vid_path,rotate=rotate,new_file=new_file_path,frames=frames)\n print(ffmpeg_command)\n list(os.popen(ffmpeg_command))\n self.vid_path=new_file_path\n self.delete_file=True\n stats=os.stat(new_file_path)\n if stats.st_size==0:\n raise Exception('File conversion failed, check that ffmpeg is on PATH')\n else:\n #Not a cine\n self.vid_path=vid_path\n self.delete_file=False",
"def avi2mpg(filename):\n assert filename.endswith('.avi')\n ofile = '%s.mpg' % os.path.splitext(filename)[0]\n run_shell_cmd('ffmpeg -y -i %s -qscale:v 1 %s' % (filename, ofile), ignore=True)\n return ofile",
"def mpg2avi(filename):\n assert filename.endswith('.mpg')\n ofile = '%s.avi' % os.path.splitext(filename)[0]\n run_shell_cmd('ffmpeg -y -i %s -qscale:v 2 %s' % (filename, ofile), ignore=True)\n return ofile",
"def makeVideo():\n os.system(\"cd video && ffmpeg -r 10 -i img%05d.jpg -vcodec mpeg4 -y caronthehill_clip.mp4\")",
"def splice_clips(list_file, output_file):\n cmd = ['ffmpeg']\n cmd += ['-f', 'concat']\n cmd += ['-i', list_file]\n cmd += ['-c', 'copy']\n cmd += ['-y']\n cmd += [output_file + '_clips.mp4']\n #cmd += ['> /dev/null 2>&1 < /dev/null'] \n\n cmd_string = ''.join([\"%s \" % el for el in cmd])\n print \"Splicing clips: \", cmd_string\n #print '-->Running: ', cmd_string\n p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE)#, stderr=subprocess.PIPE)\n p.wait()",
"def transcodetomp4(file_in, logger):\n\n import subprocess\n\n file_out = file_in.replace('.mkv', '.mp4')\n\n if os.path.isfile('/usr/bin/avconv'):\n\n convert_command = 'su securityspy -c \\\"/usr/bin/avconv -i \"{}\" -f mp4 -vcodec copy -acodec '.format(file_in) + \\\n 'libfaac -b:a 112k -ac 2 -y \"{}\"'.format(file_out) + \"\\\"\"\n\n try:\n subprocess.check_call(convert_command, shell=True)\n except subprocess.CalledProcessError:\n logger.error(\"The command to transcode: {} --- failed...\".format(convert_command))\n return file_in\n\n return file_out\n else:\n return file_in\n # fin",
"def make_video(data,\n xdim, ydim, sample_read_rows, sample_read_cols, image_write_rows, image_write_cols,\n directory, filename, fps = 24.0, start_frame = 1, end_frame = None, timestamp = False, fontsize = 30, ts_pos = (0,0), save_raw = False):\n\n #Command to send via the command prompt which specifies the pipe parameters\n # command = ['ffmpeg',\n # '-y', # (optional) overwrite output file if it exists\n # '-f', 'image2pipe',\n # '-vcodec', 'mjpeg', #'mjpeg',\n # '-r', '1',\n # '-r', str(fps), # frames per second\n # '-i', '-', # The input comes from a pipe\n # '-an', # Tells FFMPEG not to expect any audio\n # '-vcodec', 'mpeg4',\n # '-b:v', '5000k',\n # directory + filename + \"/\"+filename+\".mp4\",\n # '-hide_banner',\n # '-loglevel', 'panic']\n\n # Create directories if they don't exist\n if not os.path.exists(os.path.join(directory, filename, 'frames/')):\n os.makedirs(os.path.join(directory, filename, 'frames/'))\n if save_raw and not os.path.exists(os.path.join(directory, filename, 'frames-raw/')):\n os.makedirs(os.path.join(directory, filename, 'frames-raw/'))\n\n if end_frame == None:\n end_frame = data.FrameCount\n\n cm = colormap.get_cmap('viridis')\n\n for i, frame_offset in enumerate(tqdm.tqdm(range(start_frame, end_frame))):\n frame = FrameRead(data, frame_offset)\n frame_image = np.zeros([ydim, xdim], dtype=np.uint8)\n frame_image[image_write_rows, image_write_cols] = frame.frame_data[sample_read_rows, sample_read_cols]\n\n rgb_im = Image.fromarray(cm(frame_image, bytes=True)).convert('RGB')\n rgb_im.save(os.path.join(directory, filename, 'frames/', f'{i}.jpg'), 'JPEG')\n\n if save_raw:\n Image.fromarray(np.uint8(frame.frame_data), mode='L').save(os.path.join(directory, filename, 'frames-raw/', f'{i}.jpg'), 'JPEG')",
"def prepare_video(path_to_video: str, number_of_images=87) -> None:\n\n temp_video = path.join(path_to_video, 'temp_outpy.mp4')\n video = path.join(path_to_video, 'outpy.h264')\n\n # create mp4 video for metadata and compute video duration\n subprocess.run(['ffmpeg', '-i', video, '-c', 'copy', temp_video])\n result = subprocess.run([\"ffprobe\", \"-v\", \"error\", \"-show_entries\",\n \"format=duration\", \"-of\",\n \"default=noprint_wrappers=1:nokey=1\", temp_video],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n video_duration = float(result.stdout)\n\n # create images folder\n path_to_images = path.join(path_to_video, 'images')\n if path.exists(path_to_images) and path.isdir(path_to_images):\n shutil.rmtree(path_to_images)\n makedirs(path_to_images)\n\n # split the given video into images\n subprocess.run(['ffmpeg', '-i', temp_video, '-r', str(number_of_images / video_duration), '-f', 'image2',\n path.join(path_to_images, 'image%d.jpg')])\n\n # remove extra files\n remove_extra_images(path_to_images, number_of_images)\n remove(temp_video)",
"def mergeVideos(filename):\n #all possible names\n filenames=[filename+\"_vp8.webm\",filename+\"_vp9.webm\",filename+\"_h265.mp4\",filename+\"_av1.mkv\"]\n #output name\n output=filename+\"_merged.mp4\"\n #command\n cmd = [\n \"ffmpeg\",\n \"-i\",\n filenames[0],\n \"-i\",\n filenames[1],\n \"-i\",\n filenames[2],\n \"-i\",\n filenames[3],\n \"-filter_complex\",\n '\"[0:v][1:v]hstack[top]; \\#stack videos\n [2:v][3:v]hstack[bottom]; \\\n [top][bottom]vstack,format=yuv420p[v]; \\\n [0:a][1:a][2:a][3:a]amerge=inputs=4[a]\"',\n \"-map\",\n '\"[v]\"',\n \"-map\",\n '\"[a]\"',\n \"-ac\",\n \"2\",\n output\n ]\n # to convert a list to string we use join\n separator = \" \"\n com = separator.join(cmd)\n # use the command\n os.system(com)",
"def check_video_timestamps(movie_file, desired_format='.mp4', desired_framerate=30):\n\n check_video_format(movie_file, desired_format='.mp4', original_format='.avi')\n\n new_movie_file = movie_file+'_tt'+desired_format\n if not os.path.isfile(new_movie_file):\n #Convert file to 30 fps\n cmd = ['ffmpeg', '-i', movie_file+desired_format]\n cmd += ['-r', str(desired_framerate)]\n cmd += ['-y', movie_file+'_t'+desired_format]\n cmd_string = ''.join([\"%s \" % el for el in cmd]) \n #print '-->Running: ', cmd_string\n p = subprocess.Popen(cmd, shell=False)\n p.wait()\n\n #Add timecode text to video\n cmd = 'ffmpeg -i '+movie_file+'_t'+desired_format+' -vf drawtext=\\\"fontfile=/opt/X11/share/fonts/TTF/VeraMoBd.ttf: timecode=\\'00\\:00\\:00\\:00\\':rate=30: [email protected]: x=7: y=460\\\" -an -y '+movie_file+'_tt'+desired_format\n args = shlex.split(cmd)\n #print args\n p = subprocess.Popen(args, shell=False)\n p.wait()\n\n os.remove(movie_file+'_t'+desired_format)\n\n return new_movie_file",
"def _spawn_ffmpeg(self) -> None:\r\n if self.ffmpeg_proc is not None:\r\n raise RuntimeError('_spawn_ffmpeg called when ffmpeg_proc is '\r\n + f'{self.ffmpeg_proc} (not None)')\r\n\r\n args = ['ffmpeg', '-f', 'rawvideo', '-vcodec', 'rawvideo',\r\n '-s', f'{self.frame_size[0]}x{self.frame_size[1]}',\r\n '-pix_fmt', 'rgba', '-r', str(self.fps),\r\n '-loglevel', 'quiet',\r\n '-i', 'pipe:0',\r\n '-vcodec', 'h264', '-pix_fmt', 'yuv420p',\r\n '-movflags', '+faststart']\r\n\r\n if self.bitrate > 0:\r\n args.extend(['-b', f'{self.bitrate}k'])\r\n args.extend(['-y', self.outfile])\r\n\r\n create_flags = sp.CREATE_NO_WINDOW if 'nt' in os.name else 0\r\n self.ffmpeg_proc = sp.Popen(args, shell=False, stdout=None, stderr=None,\r\n stdin=sp.PIPE, creationflags=create_flags)",
"def start_recording(codec, filename=time.strftime(\"%Y-%m-%d_%H-%M-%S\")):\n global video_writer\n folder = 'video_out/' # eventually replace this with the SD card folder\n # TODO: also include branch name and/or commit ID\n path = folder + filename + '.' + filetype\n print \"Saving video to: %s\" % path\n\n height = videoinput.frame_height\n if settings.sidebyside:\n width = 2*videoinput.frame_width\n else:\n width = videoinput.frame_width\n\n try:\n video_writer = cv2.VideoWriter(path, codec, 30, (width, height))\n except:\n print \"Failed to open video file for writing!\"",
"def ffmpeg(*options):\n\tffmpeg_command = [\"ffmpeg\"] + list(options)\n\tprint(\"Calling FFMPEG:\", \" \".join(ffmpeg_command))\n\n\tprocess = subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\texit_code = process.wait()\n\tif exit_code != 0: #0 is success.\n\t\traise Exception(\"Calling FFmpeg failed with exit code {exit_code}. CERR: {cerr} . COUT: {cout}\".format(exit_code=exit_code, cerr=str(cerr), cout=str(cout)))",
"def save_video(foldername, songname, songlen, num_steps, output):\n num_steps_by_len = num_steps / songlen\n p = subprocess.Popen(['ffmpeg', '-f', 'image2', '-r', str(num_steps_by_len), '-i', '%d.png', '-c:v', 'libx264', '-pix_fmt', 'yuv420p', '-vf', 'pad=ceil(iw/2)*2:ceil(ih/2)*2', 'movie.mp4'], cwd=foldername)\n p.wait()\n\n p = subprocess.Popen(['ffmpeg', '-i', 'movie.mp4', '-i', '../audio_files/' + songname + '.mp3', '-map', '0:v', '-map', '1:a', '-c', 'copy', output], cwd=foldername)\n p.wait()",
"def make_video(input_files, width=0, height=0, frame_rate=24, crf=20, output_path=\"video.mp4\"):\n if isinstance(input_files, list):\n from PIL import Image # pylint: disable=C0415\n\n with Image.open(input_files[0]) as img:\n width, height = img.size\n tmp_dir = \"tmp_ffmpeg_dir\"\n os.mkdir(tmp_dir)\n if width % 2 != 0:\n print(f\"Width ({width}) not divisible by 2\")\n width -= 1\n if height % 2 != 0:\n print(f\"Height ({width}) not divisible by 2\")\n height -= 1\n for i, inp in enumerate(input_files):\n shutil.copy(inp, os.path.join(tmp_dir, f\"{i:06d}.png\"))\n inputs = f\"{tmp_dir}/%06d.png\"\n command = ffmpeg_common_args(frame_rate, inputs, width, height, crf, output_path)\n ret = os.system(command)\n assert ret == 0, \"ffmpeg failed to generate video file.\"\n for i in range(len(input_files)):\n os.remove(os.path.join(tmp_dir, f\"{i:06d}.png\"))\n os.rmdir(tmp_dir)\n elif isinstance(input_files, str):\n assert width != 0 and height != 0\n command = ffmpeg_common_args(frame_rate, input_files, width, height, crf, output_path)\n ret = os.system(command)\n assert ret == 0, \"ffmpeg failed to generate video file.\"\n else:\n assert (\n False\n ), f'input_files should be list (of files) or str (of file template, e.g., \"%04d.png\") instead of {type(input_files)}'",
"def make_video(pattern, plotdir, moviedir, movienametag):\n images_list = glob('%s/%s'%(plotdir, pattern))\n images_list.sort()\n # save all required files into tmp_moviedir, with simple filenames: %.4d.png\n tmp_moviedir = '%s/tmp_movie_%s'%(plotdir, movienametag)\n os.system('mkdir -p %s'%tmp_moviedir)\n for i in range(len(images_list)):\n fname = images_list[i].split('%s/'%plotdir)[-1].split('.png')[0]\n os.system('cp %s/%s.png %s/%.4d.png'%(plotdir, fname, tmp_moviedir, i))\n\n os.system('avconv -i %s'%tmp_moviedir +'/%04d.png ' \\\n +' -y -c:v libx264 -pix_fmt yuv420p %s/%s.mp4'%(moviedir, movienametag))",
"def convert_to_mp3(filename: str, title: str, start: int=None, end: int=None) -> list:\n\t# setup args for ffmpeg\n\tfile_a = f\"{path_to_wrk_dir}{filename}.mp4\" # input file\n\tfile_b = f\"{path_to_wrk_dir}{title}.mp3\" # output file\n\tfiles_b = [] # this list need if file more than 30 mb\n\targs = [\n\t\t\"/usr/bin/ffmpeg\", # path to ffmpeg\n\t\t\"-i\", # flag for input file\n\t\tfile_a, # input file\n\t\t\"-acodec\", # setup codec\n\t\t\"libmp3lame\", # codec name\n\t\t]\n\n\t# now need setup timings for target encode\n\tif start is not None and start != 0:\n\t\targs = args + [\"-ss\", str(start)]\n\tif end is not None and end != 0:\n\t\targs = args + [\"-t\", str(end - start)]\n\n\t# and last part for args to ffmpeg\n\targs = args + [\n\t\t\"-metadata\", # setup metadata for file\n\t\tf\"title={title}\", # title\n\t\t\"-metadata\",\n\t\tf\"artist={title}\", # and artist\n\t\t\"-b:a\", # setup bitrate\n\t\t\"320k\", # setup max bitrate\n\t\tfile_b,\n\t\t]\n\tprint(f\"{args}\")\n\t# start subprocess for encoding\n\tpopen = subprocess.Popen(args)\n\tpopen.wait()\n\n\t# check size file. if he more than 30 mb, bot need split him to chunks.\n\tsize = getsize(file_b) / 1024 / 1024\n\tif size > 30 and ( start or end is None ):\n\t\t# setup args for split to chunks\n\t\targs = [\n\t\t\t\"ffprobe\",\n\t\t\t\"-show_entries\",\n\t\t\t\"format=duration\",\n\t\t\t\"-i\",\n\t\t\tfile_b,\n\t\t\t]\n\n\t\t# get duration video.\n\t\tpopen = subprocess.Popen(args, stdout=subprocess.PIPE)\n\t\tpopen.wait()\n\t\toutput = popen.stdout.read()\n\t\t# now we know how long this audio file\n\t\t# split to 10 min chunks\n\t\tdur = re.findall(r\"\\d{1,10}\", str(output))\n\t\t# get chunks count for loop\n\t\tcount_chunks = (int(dur[0]) // 600) + 1\n\t\tfor chunk_start_time in range(0, count_chunks):\n\t\t\t# setup args for split\n\t\t\t# big parts of args the same for encode\n\t\t\targs = [\n\t\t\t\t\"/usr/bin/ffmpeg\",\n\t\t\t\t\"-i\",\n\t\t\t\tfile_b,\n\t\t\t\t\"-ss\",\n\t\t\t\tf\"{chunk_start_time * 600}\", # when start chunk\n\t\t\t\t\"-t\",\n\t\t\t\t\"600\", # 10 mints duration\n\t\t\t\t\"-acodec\",\n\t\t\t\t\"copy\", # copy\n\t\t\t\t\"-b:a\",\n\t\t\t\t\"320k\",\n\t\t\t\tf\"{path_to_wrk_dir}{title}_{chunk_start_time}.mp3\", # now we have path to video with chunk number.\n\t\t\t]\n\t\t\ttry:\n\t\t\t\t# start process for cut chunk\n\t\t\t\tpopen = subprocess.Popen(args, stdout=subprocess.PIPE)\n\t\t\t\tpopen.wait()\n\t\t\t# handle except.\n\t\t\texcept Exception as e:\n\t\t\t\tprint(f\"Exception - {e}\")\n\t\t\tfiles_b.append(f\"{path_to_wrk_dir}{title}_{chunk_start_time}.mp3\") # append name of file in list\n\t\tremove(file_b)\n\ttry:\n\t\t# remove tmp file\n\t\tremove(file_a)\n\t# handle except\n\texcept FileNotFoundError:\n\t\tfiles = get_file_list(path_to_wrk_dir)\n\t\tfor i in files:\n\t\t\tif -1 != f\"{path_to_wrk_dir}{i}\".find(f\"{filename}\") and f\"{i}\".find(f\".mp3\") == -1:\n\t\t\t\ttry:\n\t\t\t\t\tremove(f\"{path_to_wrk_dir}{i}\")\n\t\t\t\texcept FileNotFoundError:\n\t\t\t\t\tprint(f\"can't remove file {path_to_wrk_dir}{i}\")\n\tif len(files_b) == 0:\n\t\treturn [file_b]\n\telse:\n\t\treturn files_b",
"def seqIo_toVid(fName, ext='avi'):\n\n assert fName[-3:]=='seq', 'Not a seq file'\n sr = seqIo_reader(fName)\n N = sr.header['numFrames']\n h = sr.header['height']\n w = sr.header['width']\n fps = sr.header['fps']\n\n out = fName[:-3]+ext\n sw = skvideo.io.FFmpegWriter(out)\n # sw = cv2.VideoWriter(out, -1, fps, (w, h))\n timer = pb.ProgressBar(widgets=['Converting ', pb.Percentage(), ' -- ',\n pb.FormatLabel('Frame %(value)d'), '/',\n pb.FormatLabel('%(max)d'), ' [', pb.Timer(), '] ',\n pb.Bar(), ' (', pb.ETA(), ') '], maxval=N)\n\n for f in range(N):\n I, ts = sr.getFrame(f)\n sw.writeFrame(Image.fromarray(I))\n # sw.write(I)\n timer.update(f)\n timer.finish()\n # cv2.destroyAllWindows()\n # sw.release()\n sw.close()\n sr.close()\n print(out + ' converted')",
"def write_video(frames, filename, fps=20):\n \n # On Mac systems, copy ffmeg binaries to your PATH (http://ffmpegmac.net/)\n \n if platform.system() == 'Windows':\n err_str = 'Don\\'t know how to write a movie for %s platform' % platform.system()\n raise NotImplementedError(err_str)\n\n \n if len(frames.shape) == 4:\n pix_fmt = 'rgb24'\n else:\n pix_fmt = 'gray'\n \n # normalize\n max_pix_val = np.percentile(frames, 99.9)\n if frames.dtype in (np.bool, bool):\n frames = frames.astype(np.uint8)\n frames -= frames.min()\n frames[frames>max_pix_val] = max_pix_val\n if max_pix_val > 0:\n frames *= 255. / max_pix_val\n frames = frames.astype(np.uint8)\n \n # figure out which av program is installed\n program_name = ''\n try:\n subprocess.check_call(['avconv', '-h'], stdout=DEVNULL, stderr=DEVNULL)\n program_name = 'avconv'\n except OSError:\n try:\n subprocess.check_call(['ffmpeg', '-h'], stdout=DEVNULL, stderr=DEVNULL)\n program_name = 'ffmpeg'\n except OSError:\n pass\n if not program_name:\n raise OSError('Can\\'t find avconv or ffmpeg')\n \n # prepare pipe to av converter program\n size_str = '%ix%i' % (frames.shape[1], frames.shape[2])\n cmd = [program_name,\n '-y', # (optional) overwrite output file if it exists\n '-f', 'rawvideo',\n '-vcodec','rawvideo',\n '-s', size_str, # size of one frame\n '-pix_fmt', pix_fmt,\n '-r', str(fps), # frames per second\n '-i', '-', # input comes from a pipe\n '-an', # no audio\n '-qscale', '1',\n '-vcodec','mjpeg',\n filename]\n \n pipe = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=DEVNULL, stderr=subprocess.STDOUT)\n \n # write frames \n for frame in frames:\n frame = np.fliplr(frame)\n pipe.stdin.write(frame.tostring())\n pipe.stdin.close()\n pipe.wait()",
"def convert_files(enumerated_src_file):\n i, src_file = enumerated_src_file\n src_file = src_file.strip()\n file_extension, acodec, quality = audio_codec()\n\n dst_file = '.'.join(src_file.split('.')[:-1]) + file_extension\n sys.stdout.write(str(i + 1) + ': ' + src_file + ' -> ' + dst_file + '\\n')\n subprocess.call(['ffmpeg', '-i', src_file, '-vn', '-acodec',\n acodec, '-aq', quality, dst_file, '-loglevel', 'quiet'])\n return src_file",
"def process_video(input_file, output_file):\n # video = VideoFileClip(input_file).subclip(40,44) # from 38s to 46s\n video = VideoFileClip(input_file)\n annotated_video = video.fl_image(process_pipeline)\n annotated_video.write_videofile(output_file, audio=False)",
"def process_video(input_file, output_file):\n # video = VideoFileClip(input_file).subclip(40,44) # from 38s to 46s\n video = VideoFileClip(input_file)\n annotated_video = video.fl_image(process_pipeline)\n annotated_video.write_videofile(output_file, audio=False)",
"def main():\n print(\"This is a library for reading video sequences into python via ffmpeg. \")\n print(\"Provides the 'Video_Reader' iterator class. \")\n print(\"Requires ffmpeg be installed. \")"
] |
[
"0.67609626",
"0.6545024",
"0.64391136",
"0.6160051",
"0.6078338",
"0.60496306",
"0.6016545",
"0.6010689",
"0.5968558",
"0.59620136",
"0.5842451",
"0.583345",
"0.5804066",
"0.5784901",
"0.5773912",
"0.5729956",
"0.56486654",
"0.56443024",
"0.5613024",
"0.5551656",
"0.54755616",
"0.5473268",
"0.54720014",
"0.54596263",
"0.54592025",
"0.5432067",
"0.5398187",
"0.539504",
"0.539504",
"0.5384447"
] |
0.6554189
|
1
|
Get path to the PubChem template if it exists.
|
def _get_pubchem_template_path(self, het_id):
path = os.path.join(self.pubchem_templates, f"{het_id}.sdf")
return path if os.path.isfile(path) else ""
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_template_path(self):\n raise NotImplementedError()",
"def template_path(self) -> str:\n return self._values.get(\"template_path\")",
"def get_template(self, template):\n\n template_path = aj.config.data['email']['templates'].get(template, 'default')\n\n if template_path == 'default' or not os.path.isfile(template_path):\n template_path = DEFAULT_TEMPLATES[template]\n\n return template_path",
"def template_path(self):\n return self.get_config(\"templates\")",
"def getTemplateFile(fname):\n return os.path.join(Configurations.getTemplateDir(), fname)",
"def _get_template_filename(self):\n _format = self.cfg.get('mutations', 'format')\n if _format == 'pdf':\n tf = 'PDFTemplate.bt'\n elif _format == 'png':\n tf = 'PNG12Template.bt'\n\n module_dir = os.path.dirname(os.path.abspath(__file__))\n\n return os.path.join(module_dir, templates_dir, tf)",
"def _find_base_path(self):\n paths = [path for path, content in self._templates]\n if len(paths) == 1:\n return os.path.dirname(paths[0])\n return common_path_prefix(paths)",
"def get_template(name):\n found_dir = False\n pkg_dir = get_sitepackage_dirs()\n for pd in pkg_dir:\n if os.path.isdir(pd + '/lmdo'):\n found_dir = '{}/lmdo/local_template/{}'.format(pd, name)\n if os.path.isfile(found_dir):\n break\n else:\n found_dir = False\n \n if not found_dir:\n Oprint.warn('Template file {} is missing'.format(name), 'lmdo')\n\n return found_dir",
"def get_template_filename(template):\n config = read_config(SETTINGS_PATH)\n #String templates\n if (template in STRING_TEMPLATES):\n options = config.options(STRING_TEMPLATES_SECTION) \n for option in options:\n if (option==template):\n #Get root path for the templates\n root_path = config.get(TEMPLATES_SECTION,TEMPLATES_ROOT_PATH)\n #Get the strings path templates\n strings_path = config.get(STRING_TEMPLATES_SECTION,STRING_TEMPLATES_PATH)\n return join(root_path,strings_path),config.get(STRING_TEMPLATES_SECTION,option)",
"def get_template_from_path(path: str) -> str:\r\n path = path.replace(\"\\\\\", \"/\")\r\n return path",
"def template(self):\n return self.conf.get(\"template\", None)",
"def get_template_name(self):\n if self.template_name:\n return self.template_name\n\n if Path('_templates/global/WaitPage.html').exists():\n return 'global/WaitPage.html'\n return 'otree/WaitPage.html'",
"def getPublishPath(self, filename):\n \n #recognize first\n if not self.isRecognized(filename): return None\n else:\n filename = Template(xpath(self.currentDataset,\n './/_:fileTemplate/text()', self.currentDatasetNs)).substitute(\\\n self.groupDict, hostname=self.hostname, SCIFLO_ROOT=self.scifloRoot)\n publishAtTpls = xpath(self.currentDataset,\n './/_:publishAt/_:location/_:data/text()',\n self.currentDatasetNs)\n if isinstance(publishAtTpls, (types.ListType, types.TupleType)):\n publishTpl = publishAtTpls[0]\n else: publishTpl = publishAtTpls\n publishAt = Template(publishTpl).substitute(self.groupDict,\n hostname=self.hostname, SCIFLO_ROOT=self.scifloRoot)\n return os.path.join(publishAt, filename)",
"def template_dir(self):\n return os.path.join(Config().template_dir(), 'platform')",
"def save_path(self):\n return self.template.manager.render_template_txt(self.path, self.template)",
"def _get_config_template(self, key):\n tmp_path = self._get_config_value('templates', 'path') + key\n return tmp_path",
"def determine_template_by_path(path):\n path = path.lstrip('/')\n\n path_chunks = re.split('\\/', path)\n if len(path_chunks) <= 1:\n return path\n else:\n \"\"\"\n For now be ignorant and just return the\n first entry of the list as the possible template\n name, so in fact we only have a 1 level deep structure\n \"\"\"\n return '_%s.html' % path_chunks[0]",
"def get_template_path(relative_path, **kwargs): # lint-amnesty, pylint: disable=unused-argument\n return relative_path",
"def __default_pptx_path(self):\n thisdir = os.path.split(__file__)[0]\n return os.path.join(thisdir, 'templates', 'default.pptx')",
"def template_path(name):\n template_dir = os.path.join(os.path.dirname(__file__), 'templates')\n return os.path.join(template_dir, (name + \".html\"))",
"def get_template_path(relative_path):\r\n\r\n if not is_request_in_microsite():\r\n return relative_path\r\n\r\n microsite_template_path = str(get_value('template_dir'))\r\n\r\n if microsite_template_path:\r\n search_path = os.path.join(microsite_template_path, relative_path)\r\n\r\n if os.path.isfile(search_path):\r\n path = '{0}/templates/{1}'.format(\r\n get_value('microsite_name'),\r\n relative_path\r\n )\r\n return path\r\n\r\n return relative_path",
"def get_path_from_template(path_template: str, path_type: PathType = PathType.AUTO) -> str:\r\n # automatically select path type depending on running OS\r\n if path_type == PathType.AUTO:\r\n if platform.system() == \"Windows\":\r\n path_type = PathType.WINDOWS\r\n elif platform.system() == \"Linux\":\r\n path_type = PathType.LINUX\r\n else:\r\n raise RuntimeError(\"Unknown platform\")\r\n\r\n path_template = path_template.replace(\"<USERNAME>\", get_user_name())\r\n\r\n # return correctly formatted path\r\n if path_type == PathType.WINDOWS:\r\n return str(pathlib.PureWindowsPath(path_template))\r\n elif path_type == PathType.LINUX:\r\n return str(pathlib.PurePosixPath(path_template))\r\n else:\r\n raise RuntimeError(\"Unknown platform\")",
"def template(self):\n template_names = self.get_template_names()\n if template_names:\n return template_names[0]\n return None",
"def path(self) -> Optional[str]:\n return pulumi.get(self, \"path\")",
"def path(self) -> Optional[str]:\n return pulumi.get(self, \"path\")",
"def path(self) -> Optional[str]:\n return pulumi.get(self, \"path\")",
"def getTmpTemplateFile(fname):\n return os.path.join(Configurations.getTmpTemplateDir(), fname)",
"def get_template(self, template):\n\n\n env = Environment(\n loader=FileSystemLoader('templates')\n )\n return env.get_template(template)",
"def _get_template(self):\n # Get templates and put them in the order of importance:\n # 1. template specified in \"modules.yaml\"\n # 2. template specified in a package directly\n # 3. default template (must be defined, check in __init__)\n module_system_name = str(self.module.__name__).split(\".\")[-1]\n package_attribute = \"{0}_template\".format(module_system_name)\n choices = [\n self.conf.template,\n getattr(self.spec.package, package_attribute, None),\n self.default_template, # This is always defined at this point\n ]\n # Filter out false-ish values\n choices = list(filter(lambda x: bool(x), choices))\n # ... and return the first match\n return choices.pop(0)",
"def _get_template(self):\n try:\n template_path = current_app.config.get('REPORT_TEMPLATE_PATH')\n template_code = Path(f'{template_path}/{self._get_template_filename()}').read_text()\n # substitute template parts\n template_code = self._substitute_template_parts(template_code)\n except Exception as err: # noqa: B902; just logging\n current_app.logger.error(err)\n raise err\n return template_code"
] |
[
"0.6953855",
"0.68982375",
"0.68708664",
"0.67688173",
"0.65419143",
"0.64124465",
"0.63890076",
"0.63717955",
"0.6362928",
"0.63347596",
"0.6195259",
"0.61605114",
"0.61545146",
"0.61206657",
"0.6091912",
"0.60857964",
"0.60805887",
"0.6065575",
"0.60610527",
"0.6056612",
"0.60344166",
"0.60231346",
"0.60086554",
"0.5999773",
"0.5999773",
"0.5999773",
"0.5994696",
"0.5989366",
"0.5982411",
"0.5970935"
] |
0.7834179
|
0
|
Counts number of collisions among all bonds. Can be used for estimations of how 'wrong' the depiction is.
|
def count_bond_collisions(self):
errors = 0
for i in range(0, len(self.bonds)):
for a in range(i + 1, len(self.bonds)):
result = self._intersection(self.bonds[i], self.bonds[a])
if result:
errors += 1
return errors
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def count_bonds(self):\n n = 0\n for bond in self.iter_bonds():\n n += 1\n return n",
"def count_bonds(self):\n n = 0\n for bond in self.iter_bonds():\n n += 1\n return n",
"def get_collisions(self) -> int:\n if len(self.obstacles) == 0:\n return 0\n collision_list = [ob.detect_collision(self.agent)\n for ob in self.obstacles]\n return sum(collision_list)",
"def get_collisions(self) -> int:\n if len(self.obstacles) == 0:\n return 0\n collision_list = [ob.detect_collision(self.agent)\n for ob in self.obstacles]\n\n return sum(collision_list)",
"def get_collisions(self) -> int:\n c = 0\n for o in self.obstacles:\n if not isinstance(o, Bomb):\n continue # only consider apples\n xy_diff = o.get_position()[:2] - self.agent.get_position()[:2]\n dist = np.linalg.norm(xy_diff)\n # obstacles are only active when they are visible...\n if o.is_visible and dist < self.detection_distance:\n o.update_visuals(make_visible=False)\n c += 1\n return c",
"def get_collisions(self) -> int:\n if len(self.obstacles) == 0:\n collision_list = []\n else:\n collision_list = [ob.detect_collision(self.agent)\n for ob in self.obstacles]\n return sum(collision_list)",
"def __numHeads(self):\n count = 1\n\n while (self.__coinFlip() == 1):\n count += 1\n return count",
"def get_number_of_bulls(self):\n list_of_bulls = [i for i, j in zip(self.puzzle, self.guess) if i == j]\n bulls = len(list_of_bulls)\n return bulls",
"def count_balls(self, **kwargs):\n return 0",
"def get_total_collisions(self):\n return self.count_collisions",
"def number_of_containing_bags(self) -> int:\n\n bag_count = 0\n for sub_bag_count, sub_bag_color in self.containing_bags:\n bag_count += sub_bag_count\n bag_count += (\n sub_bag_count * bag_registry[sub_bag_color].number_of_containing_bags\n )\n return bag_count",
"def numnems(self):\n count = 0\n for o in self._objs.values():\n count += len(o.netifs())\n return count",
"def nClumps(self):\n \n return len(self)",
"def total_num_bonds(self):\n return self.GetNumberOfBonds()",
"def _num_of_consolidated(self, observation):\n a = set(observation)\n b = set(np.arange(self.num_of_servers))\n intersect = b - a\n return len(intersect)",
"def _num_of_consolidated(self, observation):\n a = set(observation)\n b = set(np.arange(self.num_of_servers))\n intersect = b - a\n return len(intersect)",
"def lives_counter(self):\n count = 15\n for row in self.board:\n for column in row:\n if column == HITSHIP:\n count -= 1\n self.lives = count\n return self.lives",
"def depiction_score(self):\n\n collision_penalty = 1\n degenerated_penalty = 0.4\n\n bond_collisions = self.count_bond_collisions()\n degenerated_atoms = self.count_suboptimal_atom_positions(0.0, 0.5)\n\n score = (\n collision_penalty * bond_collisions\n + degenerated_penalty * degenerated_atoms\n )\n\n return round(score, 1)",
"def ball_num(self):\n counter = 0\n for i in range(0, 100):\n if self.cells[i].is_ball:\n counter += 1\n return int(counter)",
"def get_destroyed_ships_count(self):\n destroyed_ships_count = 0\n for row_index in range(self.rows):\n for column_index in range(self.columns):\n cell = self.grid[row_index][column_index]\n if cell.has_destroyed_ship():\n destroyed_ships_count += 1\n\n return destroyed_ships_count",
"def heavy_count(mol,idxs):\n count = 0\n for num, bonds in enumerate(mol.GetBonds()):\n if mol.GetBondWithIdx(num).GetBeginAtomIdx() == idxs:\n if mol.GetAtomWithIdx(mol.GetBondWithIdx(num).GetEndAtomIdx()).GetSymbol() != 'H':\n count += 1\n elif mol.GetBondWithIdx(num).GetEndAtomIdx() == idxs:\n if mol.GetAtomWithIdx(mol.GetBondWithIdx(num).GetBeginAtomIdx()).GetSymbol() != 'H':\n count += 1\n return count",
"def countEdges(self):\n n = 0\n for (hub, table) in self.totsupport.iteritems():\n n += len(table)\n return n",
"def obstacle_count(self):\n #scan area in front of robot\n self.scan()\n #Figure ot how many obstacles there were\n see_an_object = False\n count = 0",
"def buses_count(self):\n\n count = 0\n for line in self.__bus_dict.values():\n # for item in buses:\n count += len(line)\n return count",
"def count_atoms(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_atoms()\n return n",
"def count_atoms(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_atoms()\n return n",
"def count_liberties(self, x, y):\n return len(self.get_liberties(x, y))",
"def count_all_atoms(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_all_atoms()\n return n",
"def no_locked_budgets(self) -> int:\n count = 0\n for budget in self.budgets.values():\n if budget.locked:\n count += 1\n return count",
"def get_number_of_cheeses(self):\n number = 0\n for i in range(len(self._stools)):\n number += len(self._stools[i])\n return number"
] |
[
"0.6844145",
"0.6844145",
"0.68411255",
"0.68388706",
"0.67286265",
"0.66780823",
"0.64739144",
"0.62883806",
"0.6247389",
"0.62287146",
"0.61868566",
"0.617473",
"0.6031441",
"0.60012114",
"0.5952148",
"0.5952148",
"0.5926391",
"0.5919017",
"0.5828075",
"0.5818269",
"0.58110774",
"0.5802668",
"0.57986635",
"0.57904416",
"0.5785508",
"0.5785508",
"0.57784915",
"0.5745833",
"0.5738232",
"0.57300115"
] |
0.7136189
|
0
|
Get batch generator `batch_generator` must define a `shape` property that returns the shape of generated sequences as a (n_samples, n_features) tuple. `batch_generator` must define a method called `get_steps_per_epoch` with the signature `def get_steps_per_epoch(self, protocol, subset)` that returns the number of batches to generate before ending an epoch. `batch_generator` may optionally define a method called `callbacks` with the signature `def callbacks(self, extract_embedding=None)` that is expected to return a list of Keras callbacks that will be added to the list of callbacks during training. This might come in handy in case the `batch_generator` depends on the internal state of the model currently being trained.
|
def get_generator(self, file_generator, batch_size=None, **kwargs):
raise NotImplementedError('')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def fit_generator(self, generator: \"DataGenerator\", nb_epochs: int = 20, **kwargs) -> None:\n raise NotImplementedError",
"def gen_batch(self):\n batch_size = self.batch_size\n shuffle = self.shuffle\n data = np.array(self.sentences)\n\n data_size = len(data)\n num_batches_per_epoch = int((len(data) - 1) / batch_size) + 1\n while True:\n # shuffle the data at starting of each epoch\n shuffled_data = data\n if shuffle:\n shuffle_indices = np.random.permutation(np.arange(data_size))\n shuffled_data = data[shuffle_indices]\n \n for batch_num in range(num_batches_per_epoch):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n yield self._format_samples(shuffled_data[start_index:end_index], self.max_length)\n\n if self.mode in ['train', \"pred\"]:\n break",
"def get_generators(patch_size, batch_size, preprocess_func, output_reshape_func, num_validation, train_processes,\n train_cache, train_data_dir='data/train/'):\n\n dirs = util.get_data_list(train_data_dir)\n labels = util.parse_labels_months()\n train_paths, validation_paths = util.train_validation_split(dirs, labels)\n # generate train batch loader\n train_data_loader = CTBatchLoader(train_paths, batch_size, patch_size, num_threads_in_multithreaded=1,\n preprocess_func=preprocess_func)\n\n train_transforms = get_train_transform(patch_size)\n train_data_generator = MultiThreadedAugmenter(train_data_loader, train_transforms, num_processes=train_processes,\n num_cached_per_queue=train_cache, seeds=None, pin_memory=False)\n\n # wrapper to be compatible with keras\n train_generator_keras = KerasGenerator(train_data_generator, output_reshapefunc=output_reshape_func)\n\n # generate validation batch loader\n valid_data_loader = CTBatchLoader(validation_paths, num_validation, patch_size,\n num_threads_in_multithreaded=1, preprocess_func=preprocess_func)\n valid_transforms = get_valid_transform(patch_size)\n valid_data_generator = MultiThreadedAugmenter(valid_data_loader, valid_transforms, num_processes=1,\n num_cached_per_queue=1, seeds=None, pin_memory=False)\n # wrapper to be compatible with keras\n valid_generator_keras = KerasGenerator(valid_data_generator, output_reshape_func, 1)\n\n return train_generator_keras, valid_generator_keras",
"def trainGenerator(self,):\n return tf.data.Dataset.from_generator(self.trainData, \\\n output_types=(tf.float32, tf.float32, tf.float32), \\\n output_shapes=(tf.TensorShape(self.config_model[\"input_shape\"]), \\\n tf.TensorShape(list(self.headoutput_shape[1:4]) + \\\n [len(self.anchor_boxes), \\\n 7+len(self.config_data[\"all_classes\"])]), \\\n tf.TensorShape([self.config_data[\"max_boxes_per_frame\"], 7]) \\\n ), )",
"def setGenerators(self):\n shape = (self.input_shape[0],self.input_shape[1])\n self.trainGen,self.validateGen = getBatchGenerators(self.batch_size,\n self.dataPath,\n shape,\n self.classMap,\n self.regression)",
"def get_generator_batch_size(self):\n\n return self.generator_batch_size",
"def train_epoch(model,\n\t \ttrain_generator,\n\t \toptimizer,\n\t \tcallback=None):\n model.train()\n for it, (batch_of_x, batch_of_y) in enumerate(train_generator):\n train_on_batch(model, batch_of_x, batch_of_y, optimizer)\n\n if callback is not None:\n callback(model)\n return",
"def batch_generator(batch_size, sequence_length,\n x_train_scaled, y_train_scaled, num_x_signals, num_y_signals, num_train):\n # Infinite loop.\n while True:\n # Allocate a new array for the batch of input-signals.\n x_shape = (batch_size, sequence_length, num_x_signals)\n x_batch = np.zeros(shape=x_shape, dtype=np.float16)\n\n # Allocate a new array for the batch of output-signals.\n y_shape = (batch_size, sequence_length, num_y_signals)\n y_batch = np.zeros(shape=y_shape, dtype=np.float16)\n\n # Fill the batch with random sequences of data.\n for i in range(batch_size):\n # Get a random start-index.\n # This points somewhere into the training-data.\n idx = np.random.randint(num_train - sequence_length)\n\n # Copy the sequences of data starting at this index.\n x_batch[i] = x_train_scaled[idx:idx + sequence_length]\n y_batch[i] = y_train_scaled[idx:idx + sequence_length]\n yield x_batch, y_batch\n # return x_batch, y_batch",
"def batch_generator(batch_size):\n\n # Infinite loop.\n while True:\n # Get a list of random indices for images in the training-set.\n idx = np.random.randint(100,size=batch_size)\n \n # Get the pre-computed transfer-values for those images.\n # These are the outputs of the pre-trained image-model.\n transf_values = np.array([transfer_values[_] for _ in idx])\n\n # For each of the randomly chosen images there are\n # at least 5 captions describing the contents of the image.\n # Select one of those captions at random and get the\n # associated sequence of integer-tokens.\n tokens = [caps_markedwords[_] for _ in idx]\n\n # Count the number of tokens in all these token-sequences.\n num_tokens = [len(t) for t in tokens]\n \n # Max number of tokens.\n max_tokens = np.max(num_tokens)\n # Pad all the other token-sequences with zeros\n # so they all have the same length and can be\n # input to the neural network as a numpy array.\n tokens_padded = pad_sequences(tokens,\n maxlen=max_tokens,\n padding='post',\n truncating='post')\n \n # Further prepare the token-sequences.\n # The decoder-part of the neural network\n # will try to map the token-sequences to\n # themselves shifted one time-step.\n decoder_input_data = tokens_padded[:, 0:-1]\n decoder_output_data = tokens_padded[:, 1:]\n\n # Dict for the input-data. Because we have\n # several inputs, we use a named dict to\n # ensure that the data is assigned correctly.\n x_data = \\\n {\n 'decoder_input': decoder_input_data,\n 'transfer_values_input': transf_values\n }\n\n\n # Dict for the output-data.\n y_data = \\\n {\n 'decoder_output': decoder_output_data\n }\n \n yield (x_data, y_data)",
"def trainingBatchGenerator(data_folder, image_shape):\n def get_batches_fn(batch_size):\n \"\"\"\n Create batches of training data\n :param batch_size: Batch Size\n :return: Batches of training data\n \"\"\"\n image_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))\n label_paths = {\n re.sub(r'_(lane|road)_', '_', os.path.basename(path)): path\n for path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))}\n background_color = np.array([255, 0, 0])\n\n random.shuffle(image_paths)\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i+batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n\n gt_bg = np.all(gt_image == background_color, axis=2)\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)\n return get_batches_fn",
"def batch_train_generator(self, X, batch_size, seq_len):\n startidx = np.random.randint(0, len(X) - seq_len, batch_size)\n while True:\n batch_X = np.array([X[start:start + seq_len]\n for start in startidx])\n batch_y = np.array(\n [X[start:start + seq_len + self.config.shift] for start in startidx])\n batch_y = batch_y[:, -1]\n startidx = (startidx + seq_len) % (len(X) - seq_len)\n yield batch_X.reshape(batch_size, seq_len, 1), batch_y.reshape(batch_size, 1)",
"def get_data_generator(train_data, validation_data):\n\n def batch_generator(mode=\"train\", batch_size=100):\n assert mode in [\"train\", \"val\"], \"The mode should be in {train, val}.\"\n if mode == \"train\":\n data = train_data.copy()\n elif mode == \"val\":\n data = validation_data.copy()\n\n while True:\n indices = np.random.permutation(np.arange(len(data)))\n data = data[indices]\n\n for i in range(len(data) // batch_size):\n yield data[i * batch_size:(i + 1) * batch_size]\n\n return batch_generator",
"def nn_batch_generator(self, x_train):\n # Shuffle the batch\n np.random.seed(self.seed)\n shuffle_index = np.arange(np.shape(x_train)[0])\n np.random.shuffle(shuffle_index)\n x = x_train[shuffle_index, :]\n y = x_train[shuffle_index, :]\n\n # Iterate until making a full epoch\n counter = 0\n while 1:\n index_batch = shuffle_index[\n self.batch_size * counter : self.batch_size * (counter + 1)\n ]\n # Decompress batch\n x_batch = x[index_batch, :]\n y_batch = y[index_batch, :]\n counter += 1\n yield (np.array(x_batch), np.array(y_batch))\n\n # Stopping rule\n if counter >= self.number_of_batches:\n counter = 0",
"def generate_validation_batch(self):\n assert self.validation_dataset is not None\n assert self.data_tags is not None\n \n # Sample indices and get data\n index_array = np.random.choice(self.num_validation_samples, self.p.trainer.batch_size)\n return self.get_data_from_indices(self.validation_dataset, index_array)",
"def train_step(self, batch, generator):\n ##\n # Split into inputs and outputs\n ##\n\n input_frames = batch[:, :, :, :-3]\n gt_output_frames = batch[:, :, :, -3:]\n\n ##\n # Train\n ##\n\n feed_dict = self.build_feed_dict(input_frames, gt_output_frames, generator)\n\n _, global_loss, global_step, summaries = self.sess.run(\n [self.train_op, self.global_loss, self.global_step, self.summaries],\n feed_dict=feed_dict)\n\n ##\n # User output\n ##\n\n if global_step % c.STATS_FREQ == 0:\n print 'DiscriminatorModel: step %d | global loss: %f' % (global_step, global_loss)\n if global_step % c.SUMMARY_FREQ == 0:\n print 'DiscriminatorModel: saved summaries'\n self.summary_writer.add_summary(summaries, global_step)\n\n return global_step",
"def train_generator(batch_size):\n\n # reset gradients\n g_solver.zero_grad()\n\n # predict on fake data\n noise = torch.randn(batch_size, Z, 1, 1, device=device)\n prediction = C(G(noise)).view(-1)\n\n # perform back propagation\n # implemenation of loss learned from Pytorch functionality learned from https://wiseodd.github.io/techblog/2017/02/04/wasserstein-gan/\n loss = -torch.mean(prediction)\n loss.backward()\n\n # adjust weights\n g_solver.step()\n\n return loss, prediction.mean().item()",
"def predict_batch_generator(self):\n input = np.zeros((self.batch_size, self.max_seq_len,\n self.embedding_size))\n seq_lengths = np.zeros((self.batch_size), dtype=np.intp)\n unique_counts = np.zeros((self.batch_size), dtype=np.intp)\n i = 0\n\n fi = open(self.config.parsed_predict_file)\n sample_gen = self.predict_sample_generator(fi)\n self.load_embedding()\n\n for sequence, seq_length, unique_count in sample_gen:\n seq_lengths[i], unique_counts[i] = seq_length, unique_count\n if seq_lengths[i] > self.max_seq_len:\n seq_lengths[i] = self.max_seq_len\n sequence = sequence[:seq_lengths[i]]\n input[i, 0:seq_lengths[i], :] = self.embedding[sequence, :]\n\n i += 1\n\n if i == self.batch_size:\n yield input, seq_lengths, unique_counts\n input = np.zeros(\n (self.batch_size, self.max_seq_len,\n self.embedding_size)\n )\n i = 0\n\n if i < self.batch_size:\n yield input[:i, :, :], seq_lengths[:i], unique_counts[:i]\n\n fi.close()",
"def list_batch_kwargs_generators(self):\n generators = []\n\n if \"batch_kwargs_generators\" in self._datasource_config:\n for key, value in self._datasource_config[\n \"batch_kwargs_generators\"\n ].items():\n generators.append({\"name\": key, \"class_name\": value[\"class_name\"]})\n\n return generators",
"def fit(self, training_generator, dimension_train, val_generator, dimension_val):\n self.model.fit_generator(generator=training_generator,\n steps_per_epoch=dimension_train // self.batch_size,\n epochs=self.epochs,\n verbose=1,\n callbacks=self.cb,\n validation_data=val_generator,\n validation_steps=dimension_val//self.batch_size,\n class_weight=None,\n max_queue_size=10,\n workers=multiprocessing.cpu_count(),\n use_multiprocessing=False,\n shuffle=True,\n initial_epoch=0)",
"def fit_generator(self, generator, nb_epochs=20, **kwargs):\n from art.data_generators import KerasDataGenerator\n\n # Try to use the generator as a Keras native generator, otherwise use it through the `DataGenerator` interface\n if isinstance(generator, KerasDataGenerator) and not hasattr(self, 'defences'):\n try:\n self._model.fit_generator(generator.generator, epochs=nb_epochs, **kwargs)\n except ValueError:\n logger.info('Unable to use data generator as Keras generator. Now treating as framework-independent.')\n super(KerasClassifier, self).fit_generator(generator, nb_epochs=nb_epochs, **kwargs)\n else:\n super(KerasClassifier, self).fit_generator(generator, nb_epochs=nb_epochs, **kwargs)",
"def get_batch(self, batch_kwargs, batch_parameters=None) -> None:\n raise NotImplementedError",
"def get_batch_kwargs_generator(self, name):\n if name in self._batch_kwargs_generators:\n return self._batch_kwargs_generators[name]\n elif (\n \"batch_kwargs_generators\" in self._datasource_config\n and name in self._datasource_config[\"batch_kwargs_generators\"]\n ):\n generator_config = copy.deepcopy(\n self._datasource_config[\"batch_kwargs_generators\"][name]\n )\n else:\n raise ValueError(\n f\"Unable to load batch kwargs generator {name} -- no configuration found or invalid configuration.\"\n )\n generator = self._build_batch_kwargs_generator(**generator_config)\n self._batch_kwargs_generators[name] = generator\n return generator",
"def get_batch_inputs(self, inputs, batch_size=None):\n total_num = inputs.shape[0]\n batch_size = batch_size or self.batch_size\n for i in range(0, total_num, batch_size):\n yield inputs[i:i + batch_size]",
"def get_batch(self, data, batch_size=None):\n if batch_size is None:\n # randomly generate a batch for training\n batch_size = self.batch_size\n random_sample = True\n else:\n # convert the whole 'data' into a batch\n # useful in validation or testing\n random_sample = False\n encoder_size, decoder_size = self.encoder_size, self.decoder_size\n # encoder_size = max([len(encoder_input) for encoder_input, _ in data])\n # decoder_size = max([len(decoder_input) for _, decoder_input in data])\n (batch_encoder_inputs, batch_decoder_inputs,\n encoder_sequence_length, decoder_sequence_length) = [], [], [], []\n\n for sample_id in xrange(batch_size):\n if random_sample:\n encoder_input, decoder_input = random.choice(data)\n else:\n encoder_input, decoder_input = data[sample_id]\n encoder_sequence_length.append(len(encoder_input))\n # add 1 for _Go\n decoder_sequence_length.append(len(decoder_input) + 1)\n\n # Encoder inputs are padded.\n encoder_pad = ([data_utils.PAD_ID] *\n (encoder_size - len(encoder_input)))\n batch_encoder_inputs.append(encoder_input + encoder_pad)\n\n # Decoder inputs get an extra \"GO\" symbol, and are padded then.\n decoder_pad_size = decoder_size - len(decoder_input) - 1\n batch_decoder_inputs.append([data_utils.GO_ID] + decoder_input +\n [data_utils.PAD_ID] * decoder_pad_size)\n\n # Here the assumption is that data_utils._PAD = 0\n batch_targets = np.zeros([batch_size, decoder_size], dtype=np.int32)\n batch_weights = np.zeros([batch_size, decoder_size], dtype=np.float32)\n for length_idx in xrange(decoder_size):\n # Create target_weights to be 0 for targets that are padding.\n for batch_idx in xrange(batch_size):\n # We set weight to 0 if the corresponding target is a\n # PAD symbol.\n # The corresponding target is decoder_input shifted by\n # 1 forward.\n if length_idx < decoder_size - 1:\n batch_targets[batch_idx][length_idx] = \\\n batch_decoder_inputs[batch_idx][length_idx + 1]\n if (length_idx < decoder_size - 1 and\n batch_targets[batch_idx, length_idx] != data_utils.PAD_ID):\n batch_weights[batch_idx][length_idx] = 1.0\n return (batch_encoder_inputs, batch_decoder_inputs,\n batch_targets, batch_weights,\n encoder_sequence_length, decoder_sequence_length)",
"def validateGenerator(self,):\n return tf.data.Dataset.from_generator(self.validateData, \\\n output_types=(tf.float32, tf.float32, tf.float32), \\\n output_shapes=(tf.TensorShape(self.config_model[\"input_shape\"]), \\\n tf.TensorShape(list(self.headoutput_shape[1:4]) + \\\n [len(self.anchor_boxes), \\\n 7+len(self.config_data[\"all_classes\"])]), \\\n tf.TensorShape([self.config_data[\"max_boxes_per_frame\"], 7]) \\\n ), )",
"def predict_generator(self, generator, dimension_generator):\n return self.model.predict_generator(generator, steps=dimension_generator//self.batch_size, max_queue_size=10, workers=1,\n use_multiprocessing=True, verbose=0)",
"def get_batch(self, batch_size, preprocessing_fn=None, mode='training', drop_long_sequences=False):\n data_file_patterns = self.dataset.get_data_filepatterns(mode=mode)\n dataset_r = self.decoder.examples_reader([data_file_patterns], bool(mode == 'training'),\n self.capacity)\n batching_scheme = self._batching_scheme(\n batch_size=batch_size,\n max_length=self.max_length,\n min_length_bucket=self.min_bucket_length,\n length_bucket_step=self.length_bucket_step,\n drop_long_sequences=drop_long_sequences,\n shard_multiplier=self.shard_multiplier,\n length_multiplier=self.length_multiplier)\n\n with tf.name_scope(\"input_pipeline\"):\n if preprocessing_fn is not None:\n dataset_r = dataset_r.map(\n lambda ex: preprocessing_fn(ex, mode), num_threads=self.num_threads)\n dataset_r = dataset_r.filter(\n lambda ex: self._example_too_big(ex, batching_scheme[\"max_length\"]))\n\n dataset_r = self.bucket_by_sequence_length(\n dataset_r, self._example_length, batching_scheme[\"boundaries\"],\n batching_scheme[\"batch_sizes\"], batching_scheme[\"window_size\"])\n # We reshuffle the batches to prevent many long-sequence batches at once.\n if batching_scheme[\"shuffle_queue_size\"] is not None:\n dataset_r = dataset_r.shuffle(batching_scheme[\"shuffle_queue_size\"])\n batched_examples = dataset_r.make_one_shot_iterator().get_next()\n return batched_examples",
"def get_test_generator(patch_size, batch_size, preprocess_func, output_reshape_func, test_data_dir='data/test/'):\n\n test_paths = util.get_data_list(test_data_dir)\n\n # generate train batch loader\n test_data_loader = CTBatchLoader(test_paths, batch_size, patch_size, num_threads_in_multithreaded=1,\n preprocess_func=preprocess_func, infinite=False)\n\n # wrapper to be compatible with keras\n return KerasGenerator(test_data_loader, output_reshapefunc=output_reshape_func,\n n=int(len(test_data_loader.indices) / batch_size))",
"def create_batch(client, generator: DataGenerator):\n try:\n event_data_batch = client.create_batch()\n for device in generator.devices:\n # event_data_batch.add(EventData(gen.generate_payload(device)))\n event_data_batch.add(EventData(generator.generate_payload(device)))\n return event_data_batch\n except Exception as e:\n print(str(e))",
"def generator (self) -> tf.keras.Sequential:\n return self._generator"
] |
[
"0.62859094",
"0.627278",
"0.6238411",
"0.6231443",
"0.61449736",
"0.6116078",
"0.6102709",
"0.6071425",
"0.6071153",
"0.60535413",
"0.60365564",
"0.6031602",
"0.60167223",
"0.59738314",
"0.5962088",
"0.59487414",
"0.59423554",
"0.5910736",
"0.5903543",
"0.5894708",
"0.58606577",
"0.583326",
"0.58268565",
"0.582496",
"0.58238584",
"0.5815553",
"0.5812809",
"0.5807285",
"0.5804486",
"0.5787141"
] |
0.6673094
|
0
|
Extract embedding from internal Keras model
|
def extract_embedding(self, from_model):
return from_model
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def gensim_to_keras(model):\n return model.wv.get_keras_embedding()",
"def get_embedding(self, model):\n embedding = []\n for node in range(len(self.graph.nodes())):\n embedding.append(list(model[str(node)]))\n embedding = np.array(embedding)\n return embedding",
"def gensim_to_keras(model):\n layer = model.wv.get_keras_embedding()\n return (layer)",
"def get_embeddings() -> tuple:\n # Initialize the model loading Universal Sentense Encoder\n # into a KerasLayer from Kaggle dataset file\n model = tf.keras.Sequential(\n [KerasLayer(encoder_path, input_shape=[], dtype=tf.string,\n output_shape=[512], trainable=False),\n # tf.keras.layers.Layer(512, dtype=tf.float16) # To reduce memory footprint\n ]\n )\n\n train_emb = model.predict(data_train['text'])\n print('Train texts converted into embeddings. Shape:', train_emb.shape)\n\n test_emb = model.predict(data_test['text'])\n print('Test texts converted into embeddings. Shape:', test_emb.shape)\n\n return train_emb, test_emb",
"def concept_embedding(concept_model: ConceptDetectionModel2D):\n return concept_model.to_embedding()",
"def embed_word(self):\n return self.emb.get_keras_embedding(dropout = self.emb_dropout,\n trainable = self.trainable_emb,\n input_length = self.sent_maxlen)",
"def embed_word(self):\n return self.emb.get_keras_embedding(trainable = self.trainable_emb,\n input_length = self.sent_maxlen)",
"def pretrained_embedding_layer(model,model2,model3, word_to_index,emb_dim_max):\n words_ignored = []\n vocab_len = len(word_to_index) + 1 \n emb_matrix = np.zeros([vocab_len,emb_dim_max])\n \n print(' Total words would be processed : '+str(vocab_len))\n for word, idx in word_to_index.items():\n if word in model:\n emb_matrix[idx,:200] = model[word]\n emb_matrix[idx,200:] = 0\n if word in model2:\n emb_matrix[idx, :100] = model2[word]\n emb_matrix[idx, 100:] = 0\n if word in model3.keys():\n emb_matrix[idx,:] = model3[word]\n else:\n words_ignored.append(word)\n print(str(len(words_ignored))+\" words ignored\")\n print(emb_matrix.shape) \n \n \n embedding_layer = Embedding(vocab_len,emb_dim_max,trainable = True)\n \n # Build the embedding layer, it is required before setting the weights of the embedding layer. \n embedding_layer.build((None,)) # Do not modify the \"None\". This line of code is complete as-is.\n # Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.\n embedding_layer.set_weights([emb_matrix])\n \n return embedding_layer,words_ignored",
"def model_extract_document_embedding(self):\n input_ids = tf.keras.layers.Input(shape=(self.maxlen,), dtype=tf.int32, name=\"ids\")\n attention_mask = tf.keras.layers.Input(shape=(self.maxlen,), dtype=tf.int32, name=\"att\")\n token = tf.keras.layers.Input(shape=(self.maxlen,), dtype=tf.int32, name=\"tok\")\n\n # Embedding :\n if self.method_embedding == 'CamemBERT':\n Camembert_model = transformers.TFCamembertModel.from_pretrained(\"jplu/tf-camembert-base\")\n x = Camembert_model(input_ids, attention_mask=attention_mask, token_type_ids=token)\n elif self.method_embedding == 'FlauBERT':\n # lr = 0.00001\n Flaubert_model = transformers.TFFlaubertModel.from_pretrained(\"jplu/tf-flaubert-base-uncased\")\n x = Flaubert_model(input_ids, attention_mask=attention_mask, token_type_ids=token)\n elif self.method_embedding == 'XLM-RoBERTa':\n # lr = 0.00001\n XLMRoBERTa_model = transformers.TFXLMRobertaModel.from_pretrained(\"jplu/tf-xlm-roberta-base\")\n x = XLMRoBERTa_model(input_ids, attention_mask=attention_mask, token_type_ids=token)\n elif self.method_embedding == 'RoBERTa':\n # Experience Test path weights :\n PATH = '/kaggle/input/tf-roberta/'\n config = transformers.RobertaConfig.from_pretrained(PATH + 'config-roberta-base.json')\n Roberta_model = transformers.TFRobertaModel.from_pretrained(PATH + 'pretrained-roberta-base.h5',\n config=config)\n # Sinon :\n # Roberta_model = transformers.TFRobertaModel.from_pretrained('roberta-base')\n x = Roberta_model(input_ids, attention_mask=attention_mask, token_type_ids=token)\n elif self.method_embedding == 'BERT':\n BERT_model = transformers.TFBertModel.from_pretrained('bert-base-uncased')\n x = BERT_model(input_ids, attention_mask=attention_mask, token_type_ids=token)\n else:\n logger.critical(\"unknown embedding method name : '{}'\".format(self.method_embedding))\n\n # word vectors shape : (None, maxlen, 768)\n x = x[0]\n cls_token = x[:, 0, :]\n\n model = tf.keras.models.Model(inputs=[input_ids, attention_mask, token], outputs=cls_token)\n return model",
"def embedding_layer(self):\n with tf.name_scope(\"Embedding_Layer\"):\n V_size = len(self.vocab)\n embed_dim = len(self.embed[0]) \n W_embed_ = tf.get_variable(\"W_embed\",shape=[V_size, embed_dim],trainable=False).assign(np.asarray(self.embed))\n W_analogy_embed_ = tf.get_variable(\"W_analogy_embed\",shape=[V_size, embed_dim],trainable=True,initializer=tf.random_uniform_initializer(minval=-1,maxval=1))\n return W_embed_, W_analogy_embed_",
"def _get_embedding_layer(self, input_data, doc_input_data):\n opts = self._options\n word_embedding = tf.Variable(tf.random_uniform((self.vocab_size, opts.embed_dim), -1.0, 1.0))\n embed = []\n\n temp = tf.zeros([opts.batch_size, opts.embed_dim])\n embed_d = []\n for n in range(opts.sentence_sample):\n temp = tf.add(temp, tf.nn.embedding_lookup(word_embedding, doc_input_data[:, n]))\n embed_d.append(temp)\n\n if opts.concat == 'True':\n combined_embed_vector_length = opts.embed_dim * opts.window_size + opts.embed_dim\n for j in range(opts.window_size):\n embed_w = tf.nn.embedding_lookup(word_embedding, input_data[:, j])\n embed.append(embed_w)\n embed.append(embed_d)\n else:\n combined_embed_vector_length = opts.embed_dim\n embed_w = tf.zeros([opts.batch_size, opts.embed_dim])\n for j in range(opts.window_size):\n embed_w += tf.nn.embedding_lookup(word_embedding, input_data[:, j])\n embed_w += embed_d\n embed.append(embed_w)\n\n return tf.concat(embed, 1), word_embedding, combined_embed_vector_length",
"def get_embeddings(model, loader, device=torch.device('cpu')):\n embeddings = []\n labels = []\n for item in loader:\n data, label = item\n data = data.view(-1, 1, data.shape[-1])\n data = data.to(device)\n label = label.to(device)\n output = model(data).squeeze(1)\n\n embedding = output.cpu().data.numpy()\n label = label.cpu().data.numpy()\n embeddings.append(embedding)\n labels.append(label)\n\n embeddings = np.array(embeddings)\n labels = np.array(labels)\n\n return embeddings, labels",
"def pretrained_embedding_layer(word_to_vec_map, word_to_index):\n \n vocab_size = len(word_to_index) + 1 # adding 1 to fit Keras embedding (requirement)\n any_word = list(word_to_vec_map.keys())[0]\n emb_dim = word_to_vec_map[any_word].shape[0] # define dimensionality of your GloVe word vectors (= 50)\n \n ### START CODE HERE ###\n # Step 1\n # Initialize the embedding matrix as a numpy array of zeros.\n # See instructions above to choose the correct shape.\n emb_matrix = np.zeros((vocab_size, emb_dim))\n \n # Step 2\n # Set each row \"idx\" of the embedding matrix to be \n # the word vector representation of the idx'th word of the vocabulary\n for word, idx in word_to_index.items():\n emb_matrix[idx, :] = word_to_vec_map[word]\n\n # Step 3\n # Define Keras embedding layer with the correct input and output sizes\n # Make it non-trainable.\n embedding_layer = tensorflow.keras.layers.Embedding(input_dim = vocab_size, output_dim = emb_dim, trainable = False)\n ### END CODE HERE ###\n\n # Step 4 (already done for you; please do not modify)\n # Build the embedding layer, it is required before setting the weights of the embedding layer. \n embedding_layer.build((None,)) # Do not modify the \"None\". This line of code is complete as-is.\n \n # Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.\n embedding_layer.set_weights([emb_matrix])\n \n return embedding_layer",
"def __glove_embed__(sequence, model):\n embedded = []\n for word in sequence:\n embedded.append(model[word])\n return embedded",
"def _get_embedding(self, data):\n # Tensor(n, c)\n cat = data['cat']\n return self.one_hot_embed(cat)",
"def get_embed(input_data, vocab_size, embed_dim):\n embedding = tf.Variable(tf.random_uniform((vocab_size,embed_dim), -1, 1))\n embed = tf.nn.embedding_lookup(embedding, input_data)\n #print (\"embed_dim: \",embed_dim) # 向量表达维度为 256\n #print (\"input_data.shape: \",input_data.shape) # (50, 5)\n #print (\"embed.shap: \", embed.shape) # word 的向量表达 ==特征 (50, 5, 256) ==(batch_size, num_step, embed_dim)\n return embed # 返回input的向量表达",
"def embedding(self, images):\n predict = self.model.predict(images)\n return predict",
"def gen_embedding(text, model, tokenizer):\n ### Tokenize the texts\n encoded_input = tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors='pt')\n \n ### Encode the tokenized data with model\n with torch.no_grad():\n model_output = model(**encoded_input)\n \n ### Pool the outputs into a single vector\n sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])\n return sentence_embeddings",
"def add_embedding(self):\n ### YOUR CODE HERE (~4-6 lines)\n embeddingTensor = tf.Variable(self.pretrained_embeddings)\n embeddings = tf.nn.embedding_lookup(embeddingTensor, self.input_placeholder)\n embeddings = tf.reshape(embeddings, [-1, self.max_length, Config.n_features * Config.embed_size])\n ### END YOUR CODE\n return embeddings",
"def bert_embed(data, bert_model, BATCH_SIZE = 16, MAX_LEN = 128):\n \n dataset = TensorDataset(\n data['input_ids'], data['attention_masks'], data['indices']\n )\n sampler = SequentialSampler(dataset)\n dataloader = DataLoader(dataset, sampler=sampler, batch_size=BATCH_SIZE)\n \n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print('Running on ' + device.type)\n if device.type == 'cuda':\n bert_model.cuda() # put bert in training mode\n \n N = data['indices'].shape[0]\n X = np.zeros((N, 768))\n pos = 0\n for batch in dataloader:\n batch = tuple(t.to(device) for t in batch)\n b_input_ids, b_input_masks, b_indices = batch\n \n with torch.no_grad():\n embeddings = bert_model(\n b_input_ids.view(-1, MAX_LEN),\n b_input_masks.view(-1, MAX_LEN)\n )[2]\n # Take the mean of the last 4 hidden states\n embeddings = (embeddings[-4] + embeddings[-3] + embeddings[-2] + embeddings[-1])/4\n for j, label_ind in enumerate(b_indices.cpu().detach().numpy()):\n X[pos,:] = embeddings[j, int(label_ind), :].cpu().detach().numpy()\n pos+=1\n return X",
"def keras_model_fn(_, config):\n\n f = open(config[\"embeddings_path\"],encoding='utf8')\n glove = f.readlines()[:config[\"embeddings_dictionary_size\"]]\n f.close()\n\n embedding_matrix = np.zeros((config[\"embeddings_dictionary_size\"], config[\"embeddings_vector_size\"]))\n for i in range(config[\"embeddings_dictionary_size\"]):\n if len(glove[i].split()[1:]) != config[\"embeddings_vector_size\"]:\n continue\n embedding_matrix[i] = np.asarray(glove[i].split()[1:], dtype='float32')\n\n cnn_model = tf.keras.Sequential()\n cnn_model.add(layers.Embedding(weights=[embedding_matrix],\n input_dim=config['embeddings_dictionary_size'],\n output_dim=config['embeddings_vector_size'],\n input_length=config['padding_size']))\n cnn_model.add(layers.Conv1D(filters=100,kernel_size=2,padding='valid',activation='relu',strides=1))\n cnn_model.add(layers.GlobalMaxPooling1D())\n cnn_model.add(layers.Dense(100, activation='relu'))\n cnn_model.add(layers.Dense(1, activation = 'sigmoid'))\n cnn_model.compile(loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n return cnn_model",
"def get_embedding_output(self):\n return self.embedding_output",
"def model(input_shape, model,model2,model3, word_to_index):\n \n ### START CODE HERE ###\n # Define sentence_indices as the input of the graph.\n # It should be of shape input_shape and dtype 'int32' (as it contains indices, which are integers).\n sentence_indices = Input(input_shape,dtype='int32')\n \n # Create the embedding layer pretrained with GloVe Vectors (≈1 line)\n embedding_layer, ignored_words = pretrained_embedding_layer(model,model2,model3,word_to_index,300)\n \n # Propagate sentence_indices through your embedding layer\n # (See additional hints in the instructions).\n embeddings = embedding_layer(sentence_indices)\n \n # Propagate the embeddings through an LSTM layer with 128-dimensional hidden state\n # The returned output should be a batch of sequences.\n X = LSTM(units=128,input_shape=input_shape,return_sequences=True)(embeddings)\n # Add dropout with a probability of 0.5\n X = Dropout(rate=0.5)(X)\n # Propagate X trough another LSTM layer with 128-dimensional hidden state\n # The returned output should be a single hidden state, not a batch of sequences.\n X = LSTM(units=128,input_shape=input_shape,return_sequences=False)(X)\n # Add dropout with a probability of 0.5\n X = Dropout(rate=0.5)(X)\n # Propagate X through a Dense layer with 5 units\n X = Dense(units=num_classes)(X)\n# X = Dense(6, activation='softmax')\n # Add a softmax activation\n# print(X)\n# print(type(X))\n# print(X.shape)\n# print(sum(X))\n X = Activation('softmax')(X)\n \n # Create Model instance which converts sentence_indices into X.\n model = Model(inputs=sentence_indices,outputs=X)\n \n return model",
"def _embed(self):\n with tf.variable_scope('word_embedding'):\n self.pretrained_word_mat = tf.get_variable(\"word_emb_mat\",\n [self.vocab.word_size() - 2, self.vocab.word_embed_dim],\n dtype=tf.float32,\n initializer=tf.constant_initializer(\n self.vocab.word_embeddings[2:],\n dtype=tf.float32),\n trainable=False)\n self.word_pad_unk_mat = tf.get_variable(\"word_unk_pad\",\n [2, self.pretrained_word_mat.get_shape()[1]],\n dtype=tf.float32,\n initializer=tf.constant_initializer(\n self.vocab.word_embeddings[:2],\n dtype=tf.float32),\n trainable=True)\n\n self.word_mat = tf.concat([self.word_pad_unk_mat, self.pretrained_word_mat], axis=0)\n self.p_emb = tf.nn.embedding_lookup(self.word_mat, self.p)\n self.q_emb = tf.nn.embedding_lookup(self.word_mat, self.q)",
"def _get_embedding(self, data):\n embedding_list = [super()._get_embedding(data)]\n context = data['context']\n for i in range(context.shape[1]):\n embedding_list.append(getattr(self, f'context{i}')(context[:, i:i+1]))\n return torch.cat(embedding_list, dim=1)",
"def test_extract_embeddings():\n with pytest.raises(OSError):\n model = BERTopic(bert_model='not_a_model')\n model._extract_embeddings([\"Some document\"])\n\n # model = BERTopic(bert_model='distilbert-base-nli-mean-tokens')\n # embeddings = model._extract_embeddings([\"Some document\"])\n #\n # assert isinstance(embeddings, np.ndarray)\n # assert embeddings.shape == (1, 768)",
"def get_embedding_model_params(self, output_dict):\n output_dict['model_params'] = self.trained_model_params",
"def get_embedding(self, resp):\n\n feed_dict = {self.anchor: resp}\n embedding = self.sess.run(self.embed_anchor, feed_dict=feed_dict)\n return embedding",
"def forward(self, input_sentence):\n sentence = self.word_embedding(input_sentence)\n embedding = self.encoder(sentence)\n return embedding",
"def pretrained_embedding_layer(word_to_vec_map, word_to_index):\n vocab_len = len(word_to_index) + 1 # adding 1 to fit Keras embedding (requirement)\n emb_dim = word_to_vec_map[\"cucumber\"].shape[0] # define dimensionality of your GloVe word vectors (= 50)\n emb_matrix = np.zeros((vocab_len, emb_dim)) # Initialize the embedding matrix as a numpy array of zeros of shape (vocab_len, dimensions of word vectors = emb_dim)\n for word, index in word_to_index.items(): # Set each row \"index\" of the embedding matrix to be the word vector representation of the \"index\"th word of the vocabulary\n emb_matrix[index, :] = word_to_vec_map[word]\n embedding_layer = Embedding(vocab_len, emb_dim, trainable = False) # Define Keras embedding layer with the correct output/input sizes, make it trainable. Use Embedding(...). Make sure to set trainable=False. \n embedding_layer.build((None,)) # Build the embedding layer, it is required before setting the weights of the embedding layer. Do not modify the \"None\".\n embedding_layer.set_weights([emb_matrix]) # Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.\n return embedding_layer"
] |
[
"0.7617932",
"0.7358305",
"0.7260656",
"0.70529824",
"0.70419127",
"0.6889344",
"0.6849663",
"0.681078",
"0.6796582",
"0.67891294",
"0.6745876",
"0.66203314",
"0.6616796",
"0.659487",
"0.6581204",
"0.65696263",
"0.6557681",
"0.6531176",
"0.649195",
"0.6473366",
"0.6431787",
"0.64287907",
"0.6404737",
"0.63940465",
"0.6381103",
"0.63769853",
"0.63761353",
"0.637343",
"0.634846",
"0.63394153"
] |
0.77565044
|
0
|
Get a logger that produces reasonable output.
|
def _get_logger():
logger = logging.getLogger(__name__)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(logging.Formatter("%(asctime)s [%(levelname)8s] %(message)s"))
logger.addHandler(ch)
logger.setLevel(logging.DEBUG)
return logger
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_logger(name=\"unknown_logger\"):\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n handler = logging.StreamHandler(sys.stdout)\n handler.setLevel(logging.DEBUG)\n handler.setFormatter(FORMATTER)\n logger.addHandler(handler)\n logger.propagate = False # to avoid printing the same logs multiple times\n return logger",
"def _get_logger():\n return logging.Logger(__name__)",
"def get_logger():\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter(fmt=\"%(asctime)s %(levelname)s %(name)s: %(message)s\",\n datefmt=\"%Y-%m-%d - %H:%M:%S\")\n if logger.hasHandlers():\n logger.handlers.clear()\n\n console = logging.StreamHandler(sys.stdout)\n console.setLevel(logging.INFO)\n console.setFormatter(formatter)\n\n logger.addHandler(console)\n\n return logger",
"def get_logger(name: str) -> logging.Logger:\n logger = logging.getLogger(name)\n logger.propagate = False\n logger.setLevel(logging.DEBUG)\n if not logger.handlers:\n handler = logging.StreamHandler(sys.stdout)\n handler.setFormatter(logging.Formatter(\"[%(asctime)s] %(message)s\"))\n logger.addHandler(handler)\n return logger",
"def get_logger(logger_name='default'):\n log = logging.getLogger(logger_name)\n log.setLevel(logging.DEBUG)\n log_format = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n\n ch = logging.StreamHandler(sys.stdout)\n ch.setFormatter(log_format)\n if log.hasHandlers():\n log.handlers.clear()\n log.addHandler(ch)\n\n return log",
"def get_logger():\r\n global logger\r\n \r\n if logger:\r\n return logger\r\n else:\r\n return create_logger()",
"def get_logger(name):\n log = logging.getLogger(name)\n # we don't set the logger's level to inherit from the parent logger.\n if log.handlers:\n return log\n fmt = logging.Formatter(LOG_FMT)\n shdlr = logging.StreamHandler()\n shdlr.setFormatter(fmt)\n log.addHandler(shdlr)\n log.propagate = False\n return log",
"def get_logger(name: str) -> logging.Logger:\n logger = logging.getLogger(name)\n logger.addHandler(logging.StreamHandler(sys.stdout))\n logger.setLevel(logging.DEBUG)\n return logger",
"def get_logger(name='default.log', level=logging.DEBUG):\n logger = logging.getLogger(name)\n logger.setLevel(level)\n hdlr = logging.StreamHandler()\n hdlr.setLevel(level)\n fmt = PrettyFormatter()\n hdlr.setFormatter(fmt)\n logger.addHandler(hdlr)\n return logger",
"def get_logger(name):\n logger = logging.getLogger(name)\n if not logger.handlers:\n logger.propagate = 1 # propagate to parent\n console = logging.StreamHandler()\n logger.addHandler(console)\n formatter = logging.Formatter(\n '%(name)s - [%(levelname)s] - %(message)s')\n console.setFormatter(formatter)\n return logger",
"def get_logger(log_name: str) -> logging.Logger:\n logger = logging.getLogger(log_name)\n handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter('%(asctime)s - %(name)s: %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger",
"def logger() -> logging.Logger:\n return logging.getLogger(__name__)",
"def get_logger():\n logging.config.dictConfig(LOGGING_APPLICATION_CONF)\n logger = logging.getLogger(__name__)\n\n if not logger.handlers:\n logger.setLevel(logging.DEBUG)\n console_handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(\n \"%(asctime)s— %(levelname)s —\\\n %(funcName)s:%(lineno)d — %(message)s\")\n console_handler.setFormatter(formatter)\n logger.addHandler(console_handler)\n return logger",
"def get_logger():\n return logging.getLogger(__name__)",
"def logger():\n return logging.getLogger(__name__)",
"def get_logger(name):\n\n logger = logging.getLogger(name)\n if not logger.handlers:\n out = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(\n fmt='%(asctime)s - %(name)s - %(levelname)s \\\n - %(module)s - %(message)s'\n )\n out.setFormatter(formatter)\n logger.addHandler(out)\n logger.setLevel(get_config('LOGGING_LEVEL'))\n logger.propagate = False\n return logger",
"def _get_logger(verbose: bool = False) -> logging:\n logger = logging.getLogger() # root logger\n if verbose:\n logger.setLevel(logging.DEBUG)\n format_str = '%(asctime)s - %(levelname)-8s - %(module)s:%(funcName)-20s - %(message)s'\n else:\n logger.setLevel(logging.INFO)\n format_str = '%(message)s'\n\n date_format = '%Y-%m-%d %H:%M:%S'\n if os.isatty(2):\n color_format = '%(log_color)s' + format_str\n colors = {'DEBUG': 'green',\n 'INFO': 'reset',\n 'WARNING': 'bold_yellow',\n 'ERROR': 'bold_red',\n 'CRITICAL': 'bold_red'}\n formatter = colorlog.ColoredFormatter(color_format, date_format, log_colors=colors)\n else:\n formatter = logging.Formatter(format_str, date_format)\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n return logging.getLogger(__name__)",
"def get_logger(name: str):\n # setup logger\n logger = logging.getLogger(name)\n logger.setLevel(logging.INFO)\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger",
"def get_logger(name):\n #### Configure Logger ####\n # Log to stdout\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n formatter = logging.Formatter('%(asctime)s - %(message)s',\n '%m/%d/%Y %H:%M:%S')\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(logging.DEBUG)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n return logger",
"def get_logger(set_info=False):\n\n logging.basicConfig(format=\"%(message)s\", stream=sys.stdout)\n logger = logging.getLogger(\"pythonanywhere\")\n if set_info:\n logger.setLevel(logging.INFO)\n else:\n logger.setLevel(logging.WARNING)\n return logger",
"def get_logger(name=\"LazySusan\"):\n level = get_level()\n _configure(level)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n\n return logger",
"def get_logger(name: str) -> logging.Logger:\n logger = logging.getLogger(name)\n logger.setLevel(__lvl__)\n ch = logging.StreamHandler()\n ch.setLevel(__lvl__)\n preformat = f'[{logger.name}]'\n # [%(threadName)s/%(levelname)s] = [MainThread/INFO]\n ch.setFormatter(logging.Formatter(fmt=preformat + ' %(levelname)s [%(asctime)s] %(message)s',\n datefmt='%H:%M:%S'))\n logger.addHandler(ch)\n return logger",
"def _get_logger(self):\n return Logger(\"SLOTH\")",
"def create_logger():\r\n global logger\r\n logger = logging.getLogger(logger_name)\r\n\r\n formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s %(message)s')\r\n \r\n handler = logging.StreamHandler()\r\n handler.setFormatter(formatter)\r\n logger.addHandler(handler)\r\n \r\n return logger",
"def get_logger(level=logging.INFO, quite=False, debug=False, to_file=''):\n assert level in [logging.DEBUG, logging.INFO, logging.WARNING, logging.CRITICAL]\n logger = logging.getLogger('main')\n formatter = logging.Formatter('%(asctime)s - %(funcName)s - %(levelname)s - %(message)s')\n if debug:\n level = logging.DEBUG\n logger.setLevel(level=level)\n if not quite:\n if to_file:\n fh = logging.FileHandler(to_file)\n fh.setLevel(level=level)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n else:\n ch = logging.StreamHandler()\n ch.setLevel(level=level)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger",
"def _get_logger(title, verbose_lvl):\n\n logger = logging.getLogger(title)\n console = logging.StreamHandler()\n\n if verbose_lvl == 1:\n logger.setLevel(logging.INFO)\n console.setLevel(logging.INFO)\n elif verbose_lvl == 2:\n logger.setLevel(logging.DEBUG)\n console.setLevel(logging.DEBUG)\n else:\n logger.setLevel(logging.WARNING)\n console.setLevel(logging.WARNING)\n\n fmt = logging.Formatter(\n '%(asctime)s %(name)s %(levelname)s %(message)s')\n console.setFormatter(fmt)\n logger.addHandler(console)\n\n return logger",
"def get_console_logger(name=None):\n if name is None:\n name = os.path.splitext(os.path.basename(sys.argv[0]))[0]\n logger = logging.getLogger(name)\n\n # reset handlers\n logger.handlers = []\n sh = logging.StreamHandler()\n fmt = logging.Formatter(LOG_FMT)\n sh.setFormatter(fmt)\n logger.addHandler(sh)\n logger.setLevel(logging.INFO)\n\n return logger",
"def _logger(self):\n logger = logging.getLogger(self.NAME)\n logger.setLevel(self.LOG_LEVEL)\n shandler = logging.StreamHandler(sys.stdout)\n fmt = '\\033[1;32m%(levelname)-5s %(module)s:%(funcName)s():'\n fmt += '%(lineno)d %(asctime)s\\033[0m| %(message)s'\n shandler.setFormatter(logging.Formatter(fmt))\n logger.addHandler(shandler)\n return logger",
"def logger(self, name):\n logger, _ = get_stdout_logger(name, verbosity=self.verbosity)\n return logger",
"def get_logger():\n # Prepare log directory.\n try:\n os.mkdir('logs')\n except FileExistsError:\n pass\n\n # Create logger and formatter.\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s %(message)s')\n\n # Create and attach stream handler.\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n\n # Create and attach file handler.\n file_handler = logging.handlers.TimedRotatingFileHandler(\n 'logs/log.txt', when='d', encoding='utf-8')\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n\n return logger"
] |
[
"0.76173455",
"0.73903525",
"0.7385359",
"0.73532313",
"0.72620827",
"0.7261054",
"0.7208833",
"0.72087556",
"0.7140604",
"0.7138828",
"0.7128172",
"0.71156687",
"0.7111992",
"0.7104806",
"0.70627075",
"0.70546305",
"0.70511794",
"0.7048386",
"0.70350707",
"0.7021459",
"0.6994875",
"0.6994218",
"0.69697547",
"0.6959803",
"0.6941394",
"0.69319063",
"0.6930942",
"0.69300723",
"0.6914437",
"0.68929183"
] |
0.76066315
|
1
|
import count or FPKM table
|
def import_countOrFPKMTable(
self,filename_I):
#import and format the data
io = base_importData();
io.read_tab(filename_I);
countOrFPKMTable = self.format_countOrFPKMTable(io.data);
return countOrFPKMTable;
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def count(self):\n ans = self.execute(self.commands.table_count(self.name))\n return ans[0][0]",
"def count():",
"def samples(app, args):\n engine = create_engine(args.datafile)\n meta = MetaData()\n meta.reflect(engine)\n print(\"\\t\".join([str(x).replace('counts.', '')\n for x in meta.tables['counts'].columns\n if not x == 'counts.index']))",
"def produce_mirna_single_counts_table(count_files, table_file):\n merge_count_tables(count_files, table_file, \".dedup.single.bam.mirbase_counts.txt\")",
"def get_table_count(table_name, query, headers, base_url, maxpagesize):\n logging.info(\"Running get_table_count() . . . \")\n\n #task_instance = context['task_instance']\n #headers = task_instance.xcom_pull('build_auth_headers', key='auth_headers')\n\n r_count = requests.get('{0}/ws/schema/table/{1}/count?{2}'.format(base_url, table_name, query), headers=headers)\n r_status = r_count.status_code\n if r_status != 200:\n logging.info('Response NOT successful. I got code {} '.format(r_status))\n raise ValueError('Response NOT successful. I got code {} '.format(r_status))\n else:\n logging.info('Response successful! I got code {} '.format(r_status))\n\n count_json = r_count.json()\n row_count = count_json['count']\n\n pages = int(math.ceil(row_count / maxpagesize))\n\n return row_count, pages",
"def count_entries(self, tablename):\n query = \"Select count(*) from \" + tablename\n try:\n self.__cur.execute(query)\n except Exception as e:\n self.__conn.rollback()\n raise e\n fetcheddata = self.__cur.fetchone()\n return fetcheddata[0]",
"def db_print_table_rows_cnt(db_path, table_name):\n path_exist = os.path.exists(db_path)\n if path_exist is False:\n print '!!!Error, database does not exist.'\n return\n\n try:\n with db.connect(db_path) as conn:\n cursor = conn.cursor()\n print(\" Table Name : '%s'\" % table_name)\n # Prepare and execute SQL statement\n sql = ('SELECT COUNT(*) FROM {}').format(table_name)\n cursor.execute(sql)\n count = cursor.fetchall()\n print(\" Total Rows : %s\" % count[0][0])\n except (db.OperationalError) as e:\n print(\"!!!Error, %s\" % repr(e))",
"def db_print_table_rows_cnt(db_path, table_name):\n path_exist = os.path.exists(db_path)\n if path_exist is False:\n print '!!!Error, database does not exist.'\n return\n\n try:\n with db.connect(db_path) as conn:\n cursor = conn.cursor()\n print(\" Table Name : '%s'\" % table_name)\n # Prepare and execute SQL statement\n sql = ('SELECT COUNT(*) FROM {}').format(table_name)\n cursor.execute(sql)\n count = cursor.fetchall()\n print(\" Total Rows : %s\" % count[0][0])\n except (db.OperationalError) as e:\n print(\"!!!Error, %s\" % repr(e))",
"def test_table_counts():\n number_of_test_run = 2 # Run the pipeline twice\n for i in range(number_of_test_run):\n dp = DataPipeline()\n dp.run()\n\n dp = DataPipeline()\n assert dp.get_product_count() == (500000,)\n assert dp.get_duplicate_count(from_table=\"products\") == (0,)\n assert dp.get_aggregate_table_result_count() == (222024, )\n 222024\n dp.close()",
"def countTable(self, in_table_name):\n self.cursor.execute('SELECT COUNT(*) FROM {};'.format(in_table_name))\n return self.cursor.fetchone()[0]",
"def count(self, query):",
"def exp_calculator_with_count(count_table_file):\n count_table = pd.read_table(count_table_file, index_col=0)\n columns = count_table.columns\n\n gene_len = count_table[columns[0]]\n rpkm_dict = dict()\n tpm_dict = dict()\n for sample in columns[1:]:\n # Divide the read counts by the length of each gene in kilobases.\n # This gives you reads per kilobase (RPK)\n rpk = count_table[sample]/gene_len\n # get rpkm/fpkm\n total_counts = sum(count_table[sample])/1000\n \"\"\"\n rpkm = (count_table[sample]/gene_len)/(sum(count_table[sample])/1000)*1000000\n \"\"\"\n rpkm = rpk/total_counts*1000000\n # get tpm\n norm_gene_len_total_counts = sum(rpk)\n tpm = rpk/norm_gene_len_total_counts*1000000\n \"\"\"\n tpm = (count_table[sample]/gene_len)/sum(count_table[sample]/gene_len)*1000000\n \"\"\"\n # save\n rpkm_dict[sample] = rpkm\n tpm_dict[sample] = tpm\n # save results\n df_rpkm = pd.DataFrame(rpkm_dict, index=count_table.index)\n df_tpm = pd.DataFrame(tpm_dict, index=count_table.index)\n df_rpkm.to_csv(count_table_file+'.fpkm.xls', sep='\\t')\n df_tpm.to_csv(count_table_file+'.tpm.xls', sep='\\t')\n #\n return rpkm_dict, tpm_dict",
"def produce_mirna_allbest_counts_table(count_files, table_file):\n merge_count_tables(count_files, table_file, \".dedup.bam.mirbase_counts.txt\")",
"def print_tables(db):\n # connect to the database and create a cursor\n\n # select all columns using SQL command\n # 'SELECT * FROM StatelessCountByCountry'\n\n # print the data from StatelessCountByCountry\n\n # select all columns using SQL command\n # 'SELECT * FROM StatelessCountByRegion'\n\n # print the data from StatelessCountByRegion",
"def getCountFiles():\n result = 0\n session = Queries.createSession()\n try:\n result = session.execute(func.count(FileTable.id)).fetchone()[0]\n except sqlalchemy.exc.ArgumentError:\n print 'SQLAlchemy ERROR: Invalid or conflicting function argument is supplied'\n except sqlalchemy.exc.CompileError:\n print 'SQLAlchemy ERROR: Error occurs during SQL compilation'\n finally:\n session.close()\n return result",
"def get_source_records_count(self, tap_type, table):\n run_query_method = getattr(self, f'run_query_{tap_type.lower()}')\n result = run_query_method(f'SELECT count(1) FROM {table}')\n return result[0][0]",
"def get_counts(filename, key):\r\n column_keys, get_data = get_csv(filename)\r\n assert(key in column_keys[1:])\r\n column = column_keys[1:].index(key)\r\n print 'getcounts() %s : %s column = %d' % (filename, key, column+1) \r\n counts_dict = {}\r\n for i,(k,v) in enumerate(get_data()):\r\n x = v[column]\r\n counts_dict[x] = counts_dict.get(x, 0) + 1\r\n return counts_dict",
"def test_b_count_id(self):\n storage = FileStorage()\n count = storage.count(Amenity)\n self.assertEqual(1, count)\n count = storage.count(State)\n self.assertEqual(1, count)\n count = storage.count(City)\n self.assertEqual(1, count)\n count = storage.count(User)\n self.assertEqual(1, count)\n count = storage.count(Place)\n self.assertEqual(1, count)\n count = storage.count(Review)\n self.assertEqual(1, count)",
"def init():\n try:\n database.CONN\n except Exception:\n database.init()\n print('Database connection established.')\n inputtools.init()\n outputtools.init()\n\n global _CONN\n global _NAME\n global _TEMP_NAME\n global _SIMPLECOUNT_COLUMNS\n global _UCR_INDICATOR_DICT\n \n _CONN = database.CONN\n _NAME = 'SimpleCount'\n _TEMP_NAME = f'Temp{_NAME}' \n _SIMPLECOUNT_COLUMNS = ['fk_simplecount_indicator', 'fk_simplecount_county', 'year', 'value']\n _UCR_INDICATOR_DICT = {\n 'domestic':1100,\n 'school':1120,\n 'hate':1130,\n 'acca': 1400,\n 'acsa':1401,\n 'ahsna':1402,\n 'adpa':1403,\n 'ameth':1404,\n 'ch':1410,\n 'rape':1411,\n 'rob':1412,\n 'aggba':1413,\n 'ach':1414,\n 'arape':1415,\n 'arob':1416,\n 'aaggba':1417,\n 'theft':1420,\n 'burg':1421,\n 'mvt':1422,\n 'arson':1423,\n 'atheft':1424,\n 'aburg':1425,\n 'amvt':1426,\n 'aarson':1427,\n 'htsex':1430,\n 'htserve':1431,\n 'ahtsex':1440,\n 'ahtserve':1441,\n }",
"def get_table_record_count(schema_name, table_name):\n sql = \"select count(*) AS 'COUNT' FROM {0}.{1} with(nolock);\"\n return fetch_row(sql.format(schema_name, table_name))",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def Count(self) -> int:",
"def fast_count(db, Model): # noqa\n return db.session.execute(\n 'SELECT n_live_tup FROM pg_stat_all_tables WHERE relname = :tablename',\n {'tablename': Model.__tablename__}\n ).scalar()",
"def load_fact_traffic_violations_count_agg(cur,code):\n cur.execute(code)",
"def load_status_table():",
"def table_stats(self, db, dest, kvargs, lines):\n if 'table' in kvargs:\n tables = [db.get_table(kvargs['table'])]\n else:\n tables = db.tables()\n options = kvargs.get('options','')\n done = False\n for table in db.tables():\n print(\"======================= {} =======================\".format(table.name))\n if 'dump' in options:\n print(\"schema dump:\")\n table.dump()\n print(\"\")\n if 'head' in options:\n print(\"First 5 records:\")\n for source_record in db.read_records_as_dicts(tablename=table.name, limit=5):\n print(source_record)\n print(\"\")\n # Compute single-variable stats on each of the variables\n sw = stopwatch().start()\n print(\"Computing statistics...\")\n stats = {}\n census_checksum = 0\n \n if self.spark_context:\n print(\"Using spark to read {} ... assuming first line has headings\".format(table.filename))\n sc = self.spark_context\n data = sc.textFile(table.filename)\n header = data.first() # extract the header\n stats = data.filter(lambda row:row!=header).map(table.parse_line_to_dict).reduce(stats_reducer)\n else:\n try:\n for source_record in db.read_records_as_dicts(tablename=table.name,limit=self.limit):\n if source_record['RECTYPE']=='P':\n census_checksum += census_person_polynominal(source_record)\n stats = stats_reducer(source_record, stats)\n except KeyboardInterrupt as e:\n print(\"*** KeyboardInterrupt at count: {}\".format(stats[':count']))\n done = True\n if stats:\n print(\"total records: {} speed: {:8.0f} records/sec\".format( stats[':count'], stats[':count']/sw.elapsed()))\n tt = tytable.ttable()\n tt.add_head(['variable','min','avg','max'])\n tt.set_col_alignment(1,tytable.ttable.RIGHT)\n tt.set_col_alignment(2,tytable.ttable.RIGHT)\n tt.set_col_alignment(3,tytable.ttable.RIGHT)\n for key in stats_variable_names(stats):\n try:\n tt.add_data([key, stats[key+\":min\"], stats[key+\":sum\"] / stats[':count'], stats[key+\":max\"]])\n except TypeError:\n tt.add_data([key, stats[key+\":min\"], \"\", stats[key+\":max\"]])\n print(tt.typeset(mode=tytable.TEXT))\n if census_checksum:\n print(\"Census checksum: {}\".format(census_checksum))\n print(\"\")\n if done:\n return True # had the keyboard abort\n return True",
"def getFileCount(self) -> int:\n ...",
"def test_get_table(self):\n my_conn = MySQL(*self.conn_params)\n inf_schema = my_conn.get_table('inf_schema') # GET TABLE example\n row_count = my_conn.engine.scalar(\n select([func.count('*')]).select_from(inf_schema)\n )\n # The select.columns parameter is not available in the method form of\n # select(), e.g. FromClause.select().\n # See https://docs.sqlalchemy.org/en/latest/core/selectable.html#\n # sqlalchemy.sql.expression.FromClause.select\n my_conn.engine.execute(\n select([inf_schema.c.table_name]).select_from(inf_schema))\n self.assertGreaterEqual(row_count, 100)"
] |
[
"0.6216667",
"0.59590936",
"0.5957887",
"0.5952389",
"0.5868071",
"0.58158195",
"0.57693267",
"0.57693267",
"0.5755721",
"0.5700496",
"0.56974876",
"0.5680915",
"0.56658834",
"0.5662107",
"0.56482613",
"0.56469244",
"0.5637406",
"0.559692",
"0.5541019",
"0.55354065",
"0.5535229",
"0.5535229",
"0.5535229",
"0.5535229",
"0.5512007",
"0.54988885",
"0.54971683",
"0.54693854",
"0.54447764",
"0.54443556"
] |
0.7532327
|
0
|
reformat attr tables into a dictionary for rapid alignment of attr table with tracking_id
|
def reformat_attrTable(
self):
#format into a dictionary of rows for quick aligning with the tracking_id
if self.attrTable: attrTable = self.attrTable[:];
else: attrTable = [];
attrTable_dict = {};
for row in attrTable:
attrTable_dict[row['tracking_id']] = row;
return attrTable_dict;
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _process_attrs(attrs):\n new_attrs = OrderedDict()\n for attr in attrs:\n col = attr\n if isinstance(attr, tuple):\n col, attr = attr\n # special cases\n if attr == 'class_name':\n attr = '__class__.__name__'\n if attr == 'repr':\n attr = repr\n new_attrs[col] = attr\n\n return new_attrs",
"def transform_attributes(attrs):\n transformed = {}\n for key, value in attrs.items():\n if key in [\"raw_message\", \"text\"]:\n transformed[\"raw_content\"] = value\n elif key in [\"diaspora_handle\", \"sender_handle\", \"author\"]:\n transformed[\"handle\"] = value\n elif key == \"recipient_handle\":\n transformed[\"target_handle\"] = value\n elif key == \"parent_guid\":\n transformed[\"target_guid\"] = value\n elif key == \"first_name\":\n transformed[\"name\"] = value\n elif key == \"image_url\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"large\"] = value\n elif key == \"image_url_small\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"small\"] = value\n elif key == \"image_url_medium\":\n if \"image_urls\" not in transformed:\n transformed[\"image_urls\"] = {}\n transformed[\"image_urls\"][\"medium\"] = value\n elif key == \"tag_string\":\n transformed[\"tag_list\"] = value.replace(\"#\", \"\").split(\" \")\n elif key == \"bio\":\n transformed[\"raw_content\"] = value\n elif key == \"searchable\":\n transformed[\"public\"] = True if value == \"true\" else False\n elif key == \"target_type\":\n transformed[\"entity_type\"] = DiasporaRetraction.entity_type_from_remote(value)\n elif key == \"remote_photo_path\":\n transformed[\"remote_path\"] = value\n elif key == \"remote_photo_name\":\n transformed[\"remote_name\"] = value\n elif key == \"status_message_guid\":\n transformed[\"linked_guid\"] = value\n transformed[\"linked_type\"] = \"Post\"\n elif key in BOOLEAN_KEYS:\n transformed[key] = True if value == \"true\" else False\n elif key in DATETIME_KEYS:\n try:\n # New style timestamps since in protocol 0.1.6\n transformed[key] = datetime.strptime(value, \"%Y-%m-%dT%H:%M:%SZ\")\n except ValueError:\n # Legacy style timestamps\n transformed[key] = datetime.strptime(value, \"%Y-%m-%d %H:%M:%S %Z\")\n elif key in INTEGER_KEYS:\n transformed[key] = int(value)\n else:\n transformed[key] = value or \"\"\n return transformed",
"def proc_attr(inp):\n dic = {}\n for att in inp.attrs.keys():\n if getattr(inp.attrs[att], \"dtype\", None) is None:\n dic[att] = inp.attrs[att]\n elif inp.attrs[att].dtype.char == 'S':\n dic[att] = [\n x.strip() for x in inp.attrs[att].tostring().decode('ascii').split(',')\n ]\n else:\n dic[att] = (\n inp.attrs[att][0]\n if isinstance(inp.attrs[att],np.ndarray) and\n inp.attrs[att].size==1\n else inp.attrs[att]\n )\n return dic\n pass",
"def _get_attribute_dic(self, attrs):\n attr_dic = {}\n for attr_pair in attrs:\n attr_dic[attr_pair[0]] = attr_pair[1]\n return attr_dic",
"def transform(attrs: dict) -> dict:\n\n pass",
"def attrs_to_dict(self, attrs):\n return {k: v for k, v in attrs}",
"def _checkTableAttr(self, attrs, prefix):\n if not attrs:\n return {}\n\n result = {}\n s = [] # we collect synthesized style in s\n for key, val in attrs.items():\n # Ignore keys that don't start with prefix\n if prefix and key[:len(prefix)] != prefix:\n continue\n key = key[len(prefix):]\n val = val.strip('\"')\n # remove invalid attrs from dict and synthesize style\n if key == 'width':\n s.append(\"width: %s\" % val)\n elif key == 'height':\n s.append(\"height: %s\" % val)\n elif key == 'bgcolor':\n s.append(\"background-color: %s\" % val)\n elif key == 'align':\n s.append(\"text-align: %s\" % val)\n elif key == 'valign':\n s.append(\"vertical-align: %s\" % val)\n # Ignore unknown keys\n if key not in self._allowed_table_attrs[prefix]:\n continue\n result[key] = val\n st = result.get('style', '').split(';')\n st = '; '.join(st + s)\n st = st.strip(';')\n st = st.strip()\n if not st:\n try:\n del result['style'] # avoid empty style attr\n except:\n pass\n else:\n result['style'] = st\n #logging.debug(\"_checkTableAttr returns %r\" % result)\n return result",
"def _convert_table_to_dict(self, data_table):\n column_names = ['star_name', 'distance', 'brightness', 'luminosity']\n stars = {}\n for line in data_table:\n stars[line[0]] = {column_names[i] : line[i] for i in range(1, len(column_names))}\n return stars",
"def aggregate_by_primary_attribute(table):\n result = {}\n for row in table:\n for attribute_to_aggregate_by in row[1].split(','):\n attribute_to_aggregate_by.strip()\n attribute_data = row[0]\n if attribute_to_aggregate_by not in result:\n result[attribute_to_aggregate_by] = [attribute_data]\n else:\n result[attribute_to_aggregate_by] += [attribute_data]\n return result",
"def convert_attributes(cls, attrs):\n return {}",
"def _prepare_links_table(self):\n\n links_tbl = OrderedDict()\n for colname in itertools.islice(self._pinfos, 1, None):\n links_tbl[colname] = {}\n links_tbl[colname][\"name\"] = f\"{colname}\"\n fname = colname.replace(\"%\", \"_pcnt\") + \".html\"\n links_tbl[colname][\"fname\"] = fname\n links_tbl[colname][\"hlink\"] = f\"<a href=\\\"{fname}\\\">{colname}</a>\"\n\n return links_tbl",
"def to_dict(self):\n d = {}\n i = 0\n for entry in self.entries:\n d[i] = {}\n attributes = self.get_attribute_list()\n print (attributes)\n for data in attributes:\n d[i][data] = entry.__getattribute__(data)\n i = i + 1\n return d",
"def attributes_metadata(self):\n\n attribute_meta = collections.defaultdict(dict)\n\n for attribute in self.attributes:\n attribute_meta[attribute.name]['valuemap'] = attribute.valuemap\n attribute_meta[attribute.name]['qualifiers'] = attribute.qualifiers\n\n return dict(attribute_meta)",
"def process_attrs(attrs):\n if attrs.getLength() == 0:\n return {}\n tmp_dict = {}\n for name in attrs.getNames():\n tmp_dict[name] = attrs.getValue(name)\n return tmp_dict",
"def get_attributes(self):\n \n retdict = {}\n retdict['s'] = str(self.s)\n if self.t != None:\n retdict['t'] = str(self.t)\n retdict['a'] = str(self.a)\n retdict['b'] = str(self.b)\n retdict['c'] = str(self.c)\n retdict['d'] = str(self.d)\n return retdict",
"def avail_table_to_dict(avail_data):\n avail_target = avail_data[\"TARGETID\"]\n avail_loc = avail_data[\"LOCATION\"]\n avail = dict()\n for lid, tgid in zip(avail_loc, avail_target):\n if lid in avail:\n avail[lid].append(tgid)\n else:\n avail[lid] = list([tgid])\n avail = {f: np.array(av) for f, av in avail.items()}\n return avail",
"def _createAttributeFormattingMap(self, scanf_list, reformat=True):\n\n order = []\n scanf_map = {}\n for entry in scanf_list:\n\n # grab attribute\n attribute = re.split('\\s', entry)[0]\n\n # add to order\n if attribute.startswith('_') or (not attribute in order):\n order.append(attribute)\n\n # reformat entry since sscanf doesn't support %g\n if reformat:\n entry = entry.replace('%g', '%f')\n\n # make format entry into list if multiple formats exist\n if attribute in scanf_map:\n formats = scanf_map[attribute]\n if not isinstance(formats, list):\n scanf_map[attribute] = [formats]\n scanf_map[attribute].append(entry)\n else:\n scanf_map[attribute] = entry\n\n return scanf_map, order",
"def tables() -> dict[str, str]:\n return {\n \"land_use\": \"zone_id\",\n \"tours\": \"tour_id\",\n \"trips\": \"trip_id\",\n \"persons\": \"person_id\",\n \"households\": \"household_id\",\n }",
"def _attrs_map(self) -> \"dict[int, str]\":\n return {i: attr.name for i, attr in enumerate(self._attrs())}",
"def make_category_tables(category_table):\n category2label = {}\n label2category = {}\n for item in category_table.itertuples():\n category_id = item[0]\n label_id = item[4]\n category2label[category_id] = label_id\n label2category[label_id] = category_id\n return category2label, label2category",
"def parse_attributes(self, attr):\n result = {}\n annotations = []\n # Sanitize and split attributes up\n split_attr = attr.strip(' \\t\\n;').split(';')\n for pair in split_attr:\n splitpair = pair.split('=')\n if len(splitpair) != 2:\n continue\n if splitpair[0] == \"ID\":\n result['identifier'] = splitpair[1]\n elif splitpair[0] == \"Name\":\n result['name'] = splitpair[1]\n elif splitpair[0] == \"Parent\":\n result['parent_id'] = splitpair[1]\n elif splitpair[0] == \"Dbxref\" or splitpair[0] == \"Ontology_term\":\n annotations.append(splitpair)\n # Make sure we found an ID\n if \"identifier\" not in result:\n return {}\n # Add annotations if we found any\n if annotations:\n result[\"annotations\"] = annotations\n return result",
"def attributes(table,attrs): \n if isinstance(table,Table):\n table.html_attributes = attrs\n return table",
"def insertable_dict(self):\n\n d = {p.key: getattr(self, p.key) for p in self.__mapper__.attrs if p.key not in ('table', 'stats', '_codes')}\n\n x = {('c_' + k).strip('_'): v for k, v in d.items()}\n\n return x",
"def alignAndReformat_countFPKMattrTables(\n self,analysis_id_I=None,sna2experimentID_I=None,\n sna2sns_I=None):\n #reformat\n countTable_flat = self.reformat_countTable(\n analysis_id_I=analysis_id_I,\n sna2experimentID_I=sna2experimentID_I,\n sna2sns_I=sna2sns_I,);\n fpkmTable_flat = self.reformat_fpkmTable(\n analysis_id_I=analysis_id_I,\n sna2experimentID_I=sna2experimentID_I,\n sna2sns_I=sna2sns_I,);\n attrTable_dict = self.reformat_attrTable();\n #align\n countAndFpkmTable_aligned = [];\n for row in countTable_flat[:]:\n row.update(attrTable_dict[row['tracking_id']]);\n countAndFpkmTable_aligned.append(row);\n for row in fpkmTable_flat[:]:\n row.update(attrTable_dict[row['tracking_id']]);\n countAndFpkmTable_aligned.append(row);\n return countAndFpkmTable_aligned;",
"def as_dict(self):\n return {c.key: getattr(self, c.key)\n for c in inspect(self).mapper.column_attrs}",
"def to_dict(self):\n columns = self.__mapper__.column_attrs.keys()\n result = {}\n for column in columns:\n result[column] = getattr(self, column)\n return result",
"def to_dict(self):\n columns = self.__mapper__.column_attrs.keys()\n result = {}\n for column in columns:\n result[column] = getattr(self, column)\n return result",
"def to_dict(self):\n columns = self.__mapper__.column_attrs.keys()\n result = {}\n for column in columns:\n result[column] = getattr(self, column)\n return result",
"def to_dict(self):\n columns = self.__mapper__.column_attrs.keys()\n result = {}\n for column in columns:\n result[column] = getattr(self, column)\n return result",
"def to_dict(self):\n columns = self.__mapper__.column_attrs.keys()\n result = {}\n for column in columns:\n result[column] = getattr(self, column)\n return result"
] |
[
"0.65014577",
"0.60592574",
"0.60217834",
"0.6018756",
"0.6007916",
"0.5968255",
"0.58305824",
"0.58305734",
"0.57943606",
"0.575547",
"0.57497114",
"0.574563",
"0.5674587",
"0.56742924",
"0.56551945",
"0.565404",
"0.56439865",
"0.5624873",
"0.561691",
"0.55270827",
"0.5517848",
"0.5494648",
"0.54904675",
"0.5484256",
"0.5468459",
"0.545606",
"0.545606",
"0.545606",
"0.545606",
"0.545606"
] |
0.8673553
|
0
|
reformat count table into a flattened table of sample_names/values
|
def reformat_countTable(
self,analysis_id_I=None,sna2experimentID_I=None,
sna2sns_I=None):
if self.countTable: countTable = self.countTable[:];
else: countTable = [];
countTable_flat = self.reformat_countOrFPKMTable(
countOrFPKMTable_I=countTable,
analysis_id_I=analysis_id_I,
sna2experimentID_I=sna2experimentID_I,
sna2sns_I=sna2sns_I,
count_or_FPKM = 'count');
return countTable_flat;
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def freq_table(a):\n Detail_freq = a.loc[:, (a.dtypes == object) | (a.dtypes == long) ].columns.get_values().tolist()\n print(Detail_freq)\n for freq in Detail_freq:\n df1 = pd.DataFrame(a[freq].value_counts(dropna=False).astype(float).map('{:20,.0f}'.format).sort_index()).rename(columns={freq:'Count'})\n df2 = pd.DataFrame(a[freq].value_counts(normalize = True, dropna=False).map('{:,.2%}'.format).sort_index()).rename(columns={freq:'Percentage'})\n df = pd.concat([df1, df2], axis = 1)\n print(df)",
"def write_count_table(filtered_counts, gene_names, ids_liver_header):\n with open(\"filtered_tcga_counts.tsv\", \"w\") as file:\n file.write(\"gene_id\\tgene_name\\t\" + \"\\t\".join(ids_liver_header) + \"\\n\")\n for gene_name, counts_line in zip(gene_names, filtered_counts):\n file.write(gene_name + \"\\t\" + \"\\t\" + \"\\t\".join(counts_line) + \"\\n\")",
"def __profile_to_table(data):\n output = [[\"condition\", \"mean\", \"min\", \"max\"]]\n order = data[\"order\"]\n\n for o in order:\n try:\n values = data[\"data\"][o]\n output.append(\n [o, str(mean(values)), str(min(values)), str(max(values))]\n )\n except Exception as e:\n print(e)\n\n return \"\\n\".join([\"\\t\".join(l) for l in output])",
"def reformat_countOrFPKMTable(\n self,\n countOrFPKMTable_I=None,\n analysis_id_I=None,\n sna2experimentID_I=None,\n sna2sns_I=None,\n count_or_FPKM = 'count'):\n #format into a dictionary of rows for quick aligning with the tracking_id\n countOrFPKMTable_flat = [];\n for row in countOrFPKMTable_I:\n for k,v in row.items():\n if k=='tracking_id':continue;\n tmp = {};\n tmp['analysis_id'] = analysis_id_I;\n tmp['tracking_id'] = row['tracking_id'];\n\n sample_name_lst = k.split('_');\n sample_name_base = '_'.join(sample_name_lst[:-1]);\n sample_name_rep = eval(sample_name_lst[-1]);\n if sna2experimentID_I: \n experiment_id = sna2experimentID_I[sample_name_base];\n else:\n experiment_id=None;\n tmp['experiment_id'] = experiment_id;\n if sna2sns_I: \n sample_name = sna2sns_I[sample_name_base][sample_name_rep];\n else:\n sample_name=k;\n tmp['sample_name'] = sample_name;\n\n tmp['value'] = v;\n tmp['value_units'] = count_or_FPKM;\n tmp['used_'] = True;\n tmp['comment_'] = None;\n countOrFPKMTable_flat.append(tmp);\n return countOrFPKMTable_flat;",
"def to_table(self):\n table = Table()\n table['THETA_LO'] = Quantity([self.counts.offset[:-1]], unit=self.counts.offset.unit)\n table['THETA_HI'] = Quantity([self.counts.offset[1:]], unit=self.counts.offset.unit)\n table['ENERG_LO'] = Quantity([self.counts.energy[:-1]], unit=self.counts.energy.unit)\n table['ENERG_HI'] = Quantity([self.counts.energy[1:]], unit=self.counts.energy.unit)\n table['counts'] = self.counts.to_table()['data']\n if self.counts.data_err is not None:\n table['counts_err'] = self.counts.to_table()['data_err']\n table['livetime'] = self.livetime.to_table()['data']\n table['bkg'] = self.bg_rate.to_table()['data']\n if self.bg_rate.data_err is not None:\n table['bkg_err'] = self.bg_rate.to_table()['data_err']\n table.meta['HDUNAME'] = \"bkg_2d\"\n return table",
"def format_unifrac_sample_mapping(sample_ids, otu_ids, otu_table_array):\r\n out = []\r\n for i, row in enumerate(otu_table_array):\r\n for j, val in enumerate(row):\r\n if val > 0:\r\n line = [otu_ids[i], sample_ids[j], str(val)]\r\n out.append('\\t'.join(line))\r\n return out",
"def table(x):\n c = Counter(x)\n return list(c), list(c.values())",
"def accumulate_entries_as_tables(entries):\n name_table = {}\n num_table = {}\n for number, name in entries:\n name_table[name] = number\n num_table[number] = name\n\n return name_table, num_table",
"def print_table(source, count=False):\n table_value = []\n table_header = []\n for source_key, source_value in source.items():\n for item in source_value:\n table_value.append([v for v in item.values()])\n table_header.append([k for k in item.keys()])\n if not count:\n print(tabulate(table_value,\n headers=table_header[0],\n tablefmt='orgtbl'))\n else:\n print(tabulate([[len(source_value)]],\n headers=[source_key],\n tablefmt='orgtbl'))",
"def write_count_matrix(pb_count, outfile, first=1):\n # write the header (PB names)\n print(\" \" + \"\".join([\"%6s\" % name for name in NAMES]), file=outfile)\n # write the data table\n for residue_idx, residue_pb in enumerate(pb_count):\n print(\"%-5d\" % (residue_idx + first) +\n \" \".join(\"%5d\" % i for i in residue_pb), file=outfile)",
"def summarize_sample_props(psd_list, sample_list):\n prop_list = [psd.sample_props for psd in psd_list]\n cols = ['amplicon median', 'mean size', 'lower size', 'upper size']\n\n return pd.DataFrame(prop_list, columns=cols, index=sample_list)",
"def get_split_summary_table(all_df, train_df, test_df):\n table = PrettyTable()\n table.field_names = ['set', 'N total', 'N non-ICH', 'N ICH', 'frac non-ICH', 'frac ICH']\n for df, name in zip([all_df, train_df, test_df],['All', 'Train', 'Test']):\n table.add_row([name, len(df), len(df[df.Hemorrhage == 0]), len(df[df.Hemorrhage == 1]),\n f'{len(df[df.Hemorrhage == 0])/len(df):.3%}', f'{len(df[df.Hemorrhage == 1])/len(df):.3%}'])\n return table",
"def generate_table(self, outtablename,\n cols=['A', 'B', 'AB'],\n generateTable=True):\n if generateTable:\n new_indices = ['time (s)', 'mean counts']\n for idx in self.data[cols].describe().index[2:]:\n new_indices.append(idx)\n outTable = self.data[cols].describe()\\\n .set_index(pd.Index(new_indices))\n outTable.to_latex(\n self.tables_dir + outtablename + \".tex\", float_format=\"%d\")\n print(\"Outtable: \", outTable)",
"def construct_table(self):\n table_str = self.header_row\n row_lbls, col_lbls = self.get_idxvals()\n for r,rlbl in enumerate(row_lbls):\n row_data = [self.data[rlbl,clbl] for clbl in col_lbls]\n table_str += self.construct_row(r, row_data)\n \n return table_str",
"def produce_mirna_single_counts_table(count_files, table_file):\n merge_count_tables(count_files, table_file, \".dedup.single.bam.mirbase_counts.txt\")",
"def summary():\n\n summary_result = session.query(Summary.Count, Summary.Total).all()\n session.close()\n\n # Return a List of Column Names (Sample Names)\n return jsonify(summary_result)",
"def usage_table_format(result):\n table = []\n for item in result:\n row = OrderedDict()\n row['Value'] = item['name']['localizedValue']\n row['Usage'] = item['currentValue'] or \"0\"\n row['Limit'] = item['limit'] or \"0\"\n table.append(row)\n return table",
"def format_df(df):\n df = df.transpose()\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop('gene', inplace=True)\n df.index.rename('sample_id', inplace=True)\n return df",
"def format_df(df):\n df = df.transpose()\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop('gene', inplace=True)\n df.index.rename('sample_id', inplace=True)\n return df",
"def num_54():\n frmt = \"\"\"\n :{}\n :Generate Data that conform to a uniform distribution.\n :\n :Class values: {}\n :Population size: {}\n :Results:\n : values:\n {}\n : table:\n {}\n : histogram: (class, frequency)\n {}\n :Then use NumPyArrayToTable to get your table.\n \"\"\"\n # import numpy as np\n st = 1\n end = 7\n vals = np.arange(st,end)\n reps = 10\n z = np.repeat(vals,reps)\n np.random.shuffle(z)\n ID = np.arange(len(z))\n tbl = np.array(list(zip(ID, z)), \n dtype = [('ID', 'int'), ('Class', 'int')])\n h = np.histogram(z, np.arange(st, end+1))\n h = np.array(list(zip(h[1], h[0])))\n pad = \" \"\n args =[num_54.__doc__, vals, reps*len(vals),\n indent(str(z.reshape(3,20)), pad),\n indent(str(tbl), pad), indent(str(h), pad)]\n print(dedent(frmt).format(*args))",
"def samples(app, args):\n engine = create_engine(args.datafile)\n meta = MetaData()\n meta.reflect(engine)\n print(\"\\t\".join([str(x).replace('counts.', '')\n for x in meta.tables['counts'].columns\n if not x == 'counts.index']))",
"def produce_mirna_unique_counts_table(count_files, table_file):\n merge_count_tables(count_files, table_file, \".dedup.uniq.bam.mirbase_counts.txt\")",
"def to_orange_table(samples):\n # Create table and fill it with sample data:\n table = []\n for sample in samples:\n table.append(_parse_sample_descriptor(sample.descriptor['sample']))\n\n # Create domain (header in table):\n header = [var[1]['type'].make(var[0]) for var in DATA]\n\n # It is necessary to provide all possible values for dicrete variable with\n # Iterate through all discrete variables in header:\n for head_, i in [(var, i) for i, (var, dat) in enumerate(zip(header, DATA)) if dat[1]['type'] == DiscreteVariable]:\n # Provide all possible values for discrete_var:\n head_.values = list(set([sample[i] for sample in table]))\n\n metas = [var[1]['type'].make(var[0]) for var in METAS]\n return Table(Domain(header, metas=metas), table)",
"def raw_counts(self):\n return np.array([[1, 2], [3, 4], [5, 6]])",
"def append_counting(dict):\n row_c = []\n # for nuc in NUC: #Scans all the elements and adds it to the table.\n # row_c.append(dict[nuc])\n for mot in MOT:\n row_c.append(dict[mot])\n for nuc_nr in NUC_NR :\n row_c.append(dict[nuc_nr + \"_NR\"])\n # #row.extend([dict[\"AA_NR\"], dict[\"TT_NR\"], dict[\"CC_NR\"], dict[\"GG_NR\"]])\n return row_c",
"def _get_table_from_samples(self, index):\n df = pd.DataFrame()\n for sample in self.samples:\n sd = sample.to_dict()\n ser = pd.Series(\n {k: v for (k, v) in list(sd.items()) if not k.startswith(\"_\")}\n )\n df = df.append(ser, ignore_index=True)\n index = [index] if isinstance(index, str) else index\n if not all([i in df.columns for i in index]):\n _LOGGER.debug(\n \"Could not set {} index. At least one of the \"\n \"requested columns does not exist: {}\".\n format(CFG_SAMPLE_TABLE_KEY, index))\n return df\n _LOGGER.debug(\"Setting sample_table index to: {}\".format(index))\n df.set_index(keys=index, drop=False, inplace=True)\n return df",
"def _create_counts(out_dts, out_dir):\n ma, ma_mirna = _merge(out_dts)\n out_ma = op.join(out_dir, \"counts.tsv\")\n out_ma_mirna = op.join(out_dir, \"counts_mirna.tsv\")\n ma.to_csv(out_ma, sep=\"\\t\")\n ma_mirna.to_csv(out_ma_mirna, sep=\"\\t\")\n return out_ma_mirna, out_ma",
"def count_frequency(df, count_columns: list, group_columns=['Fabric_name', 'Fabric_label'], margin_column_row:tuple=None):\n\n if margin_column_row and len(margin_column_row) == 2:\n if all([isinstance(element, bool) for element in margin_column_row]):\n # margin_column_row = ((False, False),) * len(count_columns)\n margin_column_row = (margin_column_row, ) * len(count_columns)\n\n # by default keep summary row but remove summary column\n if not margin_column_row:\n margin_column_row = ((False, True),) * len(count_columns)\n if len(count_columns) != len(margin_column_row):\n print('\\n')\n print('Parameters count_columns and margin_column_row in count_frequency function have different length')\n exit()\n\n index_lst = [df[column] for column in group_columns if column in df.columns and df[column].notna().any()]\n frequency_df = pd.DataFrame()\n\n for column, (margin_column, margin_row) in zip(count_columns, margin_column_row):\n if column in df.columns and df[column].notna().any():\n df[column].fillna(np.nan, inplace=True)\n current_df = pd.crosstab(index=index_lst, columns=df[column], margins=any((margin_column, margin_row)))\n current_df = current_df.sort_index()\n if any((margin_column, margin_row)):\n # drop column All\n if not margin_column:\n current_df.drop(columns=['All'], inplace=True)\n # drop row All\n if not margin_row:\n current_df.drop(index=['All'], inplace=True)\n if frequency_df.empty:\n frequency_df = current_df.copy()\n else:\n frequency_df = frequency_df.merge(current_df, how='outer', on=group_columns)\n\n frequency_df.fillna(0, inplace=True) \n frequency_df.reset_index(inplace=True) \n return frequency_df",
"def count_frequency(df, count_columns: list, group_columns=['Fabric_name', 'Fabric_label'], margin_column_row:tuple=None):\n\n if margin_column_row and len(margin_column_row) == 2:\n if all([isinstance(element, bool) for element in margin_column_row]):\n # margin_column_row = ((False, False),) * len(count_columns)\n margin_column_row = (margin_column_row, ) * len(count_columns)\n\n # by default keep summary row but remove summary column\n if not margin_column_row:\n margin_column_row = ((False, True),) * len(count_columns)\n if len(count_columns) != len(margin_column_row):\n print('\\n')\n print('Parameters count_columns and margin_column_row in count_frequency function have different length')\n exit()\n\n index_lst = [df[column] for column in group_columns if column in df.columns and df[column].notna().any()]\n frequency_df = pd.DataFrame()\n\n for column, (margin_column, margin_row) in zip(count_columns, margin_column_row):\n if column in df.columns and df[column].notna().any():\n df[column].fillna(np.nan, inplace=True)\n current_df = pd.crosstab(index=index_lst, columns=df[column], margins=any((margin_column, margin_row)))\n current_df = current_df.sort_index()\n if any((margin_column, margin_row)):\n # drop column All\n if not margin_column:\n current_df.drop(columns=['All'], inplace=True)\n # drop row All\n if not margin_row:\n current_df.drop(index=['All'], inplace=True)\n if frequency_df.empty:\n frequency_df = current_df.copy()\n else:\n frequency_df = frequency_df.merge(current_df, how='outer', on=group_columns)\n\n frequency_df.fillna(0, inplace=True) \n frequency_df.reset_index(inplace=True) \n return frequency_df",
"def encoding_labelcount(df, target=None):\n if not target:\n target = ['user_id', 'title']\n\n norm = round(\n df.shape[0] / 10000) # normalize the count by /per 100000 entries\n for col in target:\n df[col + '_labelcount'] = df[col].map(df[col].value_counts()) / norm\n df.drop([col], axis=1, inplace=True)\n return None"
] |
[
"0.57405555",
"0.56753397",
"0.56595165",
"0.56088334",
"0.558755",
"0.5580088",
"0.55345875",
"0.55330217",
"0.5509548",
"0.543421",
"0.5379833",
"0.5359173",
"0.53470004",
"0.52842575",
"0.52725303",
"0.5265805",
"0.5256941",
"0.52491903",
"0.52491903",
"0.52352023",
"0.52184176",
"0.5210343",
"0.5186094",
"0.5181094",
"0.5177044",
"0.51626515",
"0.5162241",
"0.516095",
"0.516095",
"0.51574534"
] |
0.627885
|
0
|
reformat fpkm table into flattened table of sample_names/values
|
def reformat_fpkmTable(
self,analysis_id_I=None,sna2experimentID_I=None,
sna2sns_I=None):
if self.fpkmTable: fpkmTable = self.fpkmTable[:];
else: fpkmTable = [];
fpkmTable_flat = self.reformat_countOrFPKMTable(
countOrFPKMTable_I=fpkmTable,
analysis_id_I=analysis_id_I,
sna2experimentID_I=sna2experimentID_I,
sna2sns_I=sna2sns_I,
count_or_FPKM = 'fpkm');
return fpkmTable_flat;
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def format_unifrac_sample_mapping(sample_ids, otu_ids, otu_table_array):\r\n out = []\r\n for i, row in enumerate(otu_table_array):\r\n for j, val in enumerate(row):\r\n if val > 0:\r\n line = [otu_ids[i], sample_ids[j], str(val)]\r\n out.append('\\t'.join(line))\r\n return out",
"def format_df(df):\n df = df.transpose()\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop('gene', inplace=True)\n df.index.rename('sample_id', inplace=True)\n return df",
"def format_df(df):\n df = df.transpose()\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop('gene', inplace=True)\n df.index.rename('sample_id', inplace=True)\n return df",
"def from_multicsv(self,input_data):\n reformatted_data = []\n for (i,row) in enumerate(input_data):\n if i==0:\n headers = row\n else:\n data_row = {}\n for (j,h) in enumerate(headers):\n if j<len(row):\n data_row.update({h : row[j]})\n else:\n data_row.update({h : 0})\n reformatted_data.append(data_row)\n return reformatted_data",
"def convert_to_table_format(package):\n tables = list()\n for primary_table_id in package.primary_table_ids:\n tables.append(StachExtensions.__generate_table(package, primary_table_id))\n return tables",
"def print_tables(hash_table, f_output, l_samples):\n\n l_fields = ['chr', 'pos', 'ref', 'alt', 'QUAL', 'FILTER',\n 'Func.refGene', 'Gene.refGene', 'GeneDetail.refGene', 'ExonicFunc.refGene', 'AAChange.refGene',\n 'cytoBand', 'ExAC_ALL', 'ExAC_AFR', 'ExAC_AMR', 'ExAC_EAS', 'ExAC_FIN', 'ExAC_NFE', 'ExAC_OTH',\n 'ExAC_SAS',\n 'avsnp147', 'SIFT_score', 'SIFT_pred', 'Polyphen2_HDIV_score', 'Polyphen2_HDIV_pred',\n 'Polyphen2_HVAR_score',\n 'Polyphen2_HVAR_pred', 'LRT_score', 'LRT_pred', 'MutationTaster_score', 'MutationTaster_pred',\n 'MutationAssessor_score', 'MutationAssessor_pred', 'FATHMM_score', 'FATHMM_pred', 'PROVEAN_score',\n 'PROVEAN_pred', 'VEST3_score', 'CADD_raw', 'CADD_phred', 'DANN_score', 'fathmm-MKL_coding_score',\n 'fathmm-MKL_coding_pred', 'MetaSVM_score', 'MetaSVM_pred', 'MetaLR_score', 'MetaLR_pred',\n 'integrated_fitCons_score', 'integrated_confidence_value', 'GERP++_RS', 'phyloP7way_vertebrate',\n 'phyloP20way_mammalian', 'phastCons7way_vertebrate', 'phastCons20way_mammalian', 'SiPhy_29way_logOdds']\n l_fields = l_fields + l_samples\n \n l_chr = set([item[0] for item in hash_table.keys()])\n\n fo = open(f_output, 'w')\n fo.write(','.join(l_fields) + '\\n')\n for key in sorted(hash_table.keys(), key=itemgetter(1)):\n fo.write(','.join(map(lambda field: hash_table[key].get(field, '.'), l_fields)) + '\\n')\n fo.close()",
"def tree2OTU_table(mvp_tree):\n series = []\n for terminal in mvp_tree.feature_tree.get_terminals():\n try:\n series.append(terminal.sample_series)\n except:\n print('there is no sample series in tree2OTU ')\n df = pd.dataframe(series)\n return df",
"def to_frame(self):\n # Create a set of dictionaries/lists for each column\n data = dict([(i_var.name, []) for i_var in self.inputs])\n data.update({self.OUTPUT_LABEL: [], self.INPUT_LABEL: [], self.name: []})\n\n # A very ugly loop to produce all the probabilities in a nice way.\n # Note that this just reproduces what is already in `self.lookup`.\n # Honestly, I just haven't thought of a better way to get nice output.\n for i_index, i_state in enumerate(self.input_states):\n for o_var, results in zip(self.outputs, self.per_state_results):\n for o_state, o_p in enumerate(results[i_index]):\n for i_var, s in zip(self.inputs, i_state):\n data[i_var.name].append(s)\n data[self.OUTPUT_LABEL].append(o_var.name)\n data[self.INPUT_LABEL].append(o_state)\n data[self.name].append(o_p)\n all_data = pd.DataFrame(data=data)\n\n # The magnificent pivot table function does all the work\n return pd.pivot_table(data=all_data, values=[self.name],\n index=[i_var.name for i_var in self.inputs],\n columns=[self.OUTPUT_LABEL, self.INPUT_LABEL])",
"def test_toTable(self):\r\n # Empty results.\r\n out_f = StringIO()\r\n self.res1.toTable(out_f)\r\n self.assertEqual(out_f.getvalue(),\r\n \"SampleID\\tSize\\tEstimate\\tStd Err\\tCI (lower)\\tCI (upper)\\n\")\r\n out_f.close()\r\n\r\n # Results with multiple samples.\r\n exp = \"\"\"SampleID\\tSize\\tEstimate\\tStd Err\\tCI (lower)\\tCI (upper)\r\nS1\\t5\\t21\\t1.5\\t2.5\\t3.5\r\nS1\\t10\\t20\\t2.5\\t2.5\\t3.5\r\nS1\\t20\\t30\\t3.5\\t2.5\\t3.5\r\nS2\\t1\\t3\\t0.4\\t2.5\\t3.5\r\n\"\"\"\r\n out_f = StringIO()\r\n self.res2.toTable(out_f)\r\n self.assertEqual(out_f.getvalue(), exp)\r\n out_f.close()\r\n\r\n # Custom header.\r\n exp = \"\"\"foo\\tbar\\tbaz\\tbazaar\\tbazaaar\\tbazaaaar\r\nS1\\t5\\t21\\t1.5\\t2.5\\t3.5\r\n\"\"\"\r\n out_f = StringIO()\r\n self.res1.addSample('S1', 42)\r\n self.res1.addSampleEstimate('S1', 5, 21, 1.5, 2.5, 3.5)\r\n self.res1.toTable(out_f,\r\n header=['foo', 'bar', 'baz', 'bazaar', 'bazaaar', 'bazaaaar'])\r\n self.assertEqual(out_f.getvalue(), exp)\r\n out_f.close()\r\n\r\n # Invalid header.\r\n with self.assertRaises(ValueError):\r\n out_f = StringIO()\r\n self.res1.toTable(out_f, header=['foo'])\r\n\r\n # Cells with None as their value.\r\n exp = \"\"\"SampleID\\tSize\\tEstimate\\tStd Err\\tCI (lower)\\tCI (upper)\r\nS1\\t43\\tN/A\\tN/A\\tN/A\\tN/A\r\n\"\"\"\r\n out_f = StringIO()\r\n res = RichnessEstimatesResults()\r\n res.addSample('S1', 42)\r\n res.addSampleEstimate('S1', 43, None, None, None, None)\r\n res.toTable(out_f)\r\n self.assertEqual(out_f.getvalue(), exp)\r\n out_f.close()",
"def _transform_map_data(self):\n WARD_FMT = '%s-%s'\n self.map_data_trans = []\n lookup = {i.column: ''.join(filter(lambda x: x.isdigit(), i.value)) for i in self.sht[1]}\n\n #skip over header\n rs = iter(self.sht.rows)\n next(rs)\n next(rs)\n for r in rs:\n pka = r[0].value\n for c in r[1:]:\n if c.value is None:\n c.value = 0\n\n self.map_data_trans.append((WARD_FMT%(pka, lookup[c.column]), c.value))",
"def _finalize(self, sampled_data):\n final_data = dict()\n for table_name, table_rows in sampled_data.items():\n parents = self.metadata.get_parents(table_name)\n if parents:\n for parent_name in parents:\n foreign_key = self.metadata.get_foreign_key(parent_name, table_name)\n if foreign_key not in table_rows:\n parent_ids = self._find_parent_ids(table_name, parent_name, sampled_data)\n table_rows[foreign_key] = parent_ids\n\n reversed_data = self.metadata.reverse_transform(table_name, table_rows)\n\n fields = self.metadata.get_fields(table_name)\n\n final_data[table_name] = reversed_data[list(fields.keys())]\n\n return final_data",
"def reformat_data(self, df, ids):\n data = np.zeros((len(ids), self.n_sample_rows + 1, self.n_features))\n idx = 0\n for i in ids:\n sample = df.loc[i]\n data[idx, 0:89, :] = sample.values\n data[idx, 89, :] = np.mean(sample.values)\n idx += 1\n return data",
"def preprocessKNN(self):\n\n feature_list = []\n\n for index, row in self.all_data.iterrows():\n chans = cv2.split(row['image'])\n\n features = []\n for chan in chans:\n hist = cv2.calcHist(chan, [0], None, [64], [0,256])\n features.extend(hist)\n\n features = np.array(features).flatten()\n feature_list.append(features)\n\n df = self.all_data[['name', 'genre']].copy()\n\n feature_df = pd.DataFrame(feature_list)\n\n df = df.join(feature_df)\n\n return df",
"def get_normalized_data_table(table_metadata, debug=False):\n suffix = table_metadata.get('suffix', '')\n data_table = table_metadata['table_class'](\n file_path=table_metadata['csv_filename'], suffix=suffix)\n drop_headers(table_metadata['document_label'], data_table.data)\n rename_headers(table_metadata['document_label'], data_table.data)\n print_data_table_length(table_metadata['document_label'],\n data_table.data,\n debug=debug)\n return data_table",
"def __profile_to_table(data):\n output = [[\"condition\", \"mean\", \"min\", \"max\"]]\n order = data[\"order\"]\n\n for o in order:\n try:\n values = data[\"data\"][o]\n output.append(\n [o, str(mean(values)), str(min(values)), str(max(values))]\n )\n except Exception as e:\n print(e)\n\n return \"\\n\".join([\"\\t\".join(l) for l in output])",
"def _get_table_from_samples(self, index):\n df = pd.DataFrame()\n for sample in self.samples:\n sd = sample.to_dict()\n ser = pd.Series(\n {k: v for (k, v) in list(sd.items()) if not k.startswith(\"_\")}\n )\n df = df.append(ser, ignore_index=True)\n index = [index] if isinstance(index, str) else index\n if not all([i in df.columns for i in index]):\n _LOGGER.debug(\n \"Could not set {} index. At least one of the \"\n \"requested columns does not exist: {}\".\n format(CFG_SAMPLE_TABLE_KEY, index))\n return df\n _LOGGER.debug(\"Setting sample_table index to: {}\".format(index))\n df.set_index(keys=index, drop=False, inplace=True)\n return df",
"def test_format_unifrac_sample_mapping(self):\r\n a = [[1, 0, 0], [0, 2, 4], [7, 0, 9.0]]\r\n otu_ids = ['OTUa', 'OTUb', 'OTUc']\r\n sample_ids = ['Sa', 'Sb', 'Sc']\r\n result = format_unifrac_sample_mapping(sample_ids, otu_ids, a)\r\n self.assertEqual(\r\n result,\r\n ['OTUa\\tSa\\t1',\r\n 'OTUb\\tSb\\t2',\r\n 'OTUb\\tSc\\t4',\r\n 'OTUc\\tSa\\t7',\r\n 'OTUc\\tSc\\t9.0'])",
"def _format_data(self):\n formatted_data = []\n\n for row in self._data_agg_by_mean_value.iterrows():\n \n car_make = row[0]\n mean_car_value = round(row[1][0], 2)\n formatted_data.append({'car_make': car_make, 'mean_car_value': mean_car_value})\n\n return formatted_data",
"def reformat_countOrFPKMTable(\n self,\n countOrFPKMTable_I=None,\n analysis_id_I=None,\n sna2experimentID_I=None,\n sna2sns_I=None,\n count_or_FPKM = 'count'):\n #format into a dictionary of rows for quick aligning with the tracking_id\n countOrFPKMTable_flat = [];\n for row in countOrFPKMTable_I:\n for k,v in row.items():\n if k=='tracking_id':continue;\n tmp = {};\n tmp['analysis_id'] = analysis_id_I;\n tmp['tracking_id'] = row['tracking_id'];\n\n sample_name_lst = k.split('_');\n sample_name_base = '_'.join(sample_name_lst[:-1]);\n sample_name_rep = eval(sample_name_lst[-1]);\n if sna2experimentID_I: \n experiment_id = sna2experimentID_I[sample_name_base];\n else:\n experiment_id=None;\n tmp['experiment_id'] = experiment_id;\n if sna2sns_I: \n sample_name = sna2sns_I[sample_name_base][sample_name_rep];\n else:\n sample_name=k;\n tmp['sample_name'] = sample_name;\n\n tmp['value'] = v;\n tmp['value_units'] = count_or_FPKM;\n tmp['used_'] = True;\n tmp['comment_'] = None;\n countOrFPKMTable_flat.append(tmp);\n return countOrFPKMTable_flat;",
"def tsv_samples_to_names(self, name='samples_to_names.tsv'):\n with open(self.a.out_dir + name, 'w') as handle:\n content = self.df_sample_names.to_csv(None, sep=self.sep, float_format=self.float_format)\n handle.writelines(content)",
"def transform(filtered_list):\n out_put = {}\n out_list = []\n # loop to get the required columns, random ordered\n for item in filtered_list:\n for val in item._fields:\n if val in type_dict:\n out_put[val] = type_dict.get(val)(getattr(item, val))\n out_list.append(out_put)\n out_put = {}\n\n # loop to the ordered columns data as per output\n all_rows = []\n for item in out_list:\n tmp_row = []\n for key in type_dict.keys():\n out_put[key] = item[key]\n tmp_row.append(item[key])\n all_rows.append(tmp_row)\n\n col_row = [col.replace('_', '-') for col in type_dict.keys()]\n all_rows.insert(0, col_row)\n return all_rows",
"def per_sample_taxa_summaries(open_table, output_format):\n t = parse_biom_table(open_table)\n header = \"#taxon\\trelative_abundance\\n\"\n\n for v, id_, md in t.iter():\n with open(output_format % id_, 'w') as f:\n f.write(header)\n\n for sorted_v, taxa in \\\n sorted(zip(v, t.ids(axis='observation')))[::-1]:\n if sorted_v:\n f.write(\"%s\\t%f\\n\" % (taxa, sorted_v))",
"def generate_table(results):\n keyslist = list(results[0].keys())\n table = PrettyTable(keyslist)\n for dct in results:\n table.add_row([dct.get(c, \"\") for c in keyslist])\n return table",
"def _load_sample_table(self):\n self.sampleTable = pd.read_table(self.config['sampletable'], sep='\\t', dtype=str)\n self.sampleTable.set_index('sampleID', inplace=True)\n self.samples = self.sampleTable.reset_index().to_dict('records')",
"def transform(input):\n transformed_file = []\n\n for row in input:\n names = row['name'].split()\n row['fname'] = names[0]\n row['lname'] = names[1]\n del row['name']\n transformed_file.append(row)\n return transformed_file",
"def table_gen(NamesL_pairs, p_pL, m_mL, p_mL, m_pL, p_valsL, p_vals_BonferoniL, RatiosL, p_valsL_divergent_convergent,\n p_valsL_divergent_convergent_BonferoniL, RatiosL_divergent_convergent, output_table):\n datafile = open(output_table, \"w\")\n datafile.write(\n \"Feature_1\" + '\\t' + \"Feature_2\" + \"\\t\" + \"plus_plus\" + '\\t' + \"minus_minus\" + '\\t' + \"plus_minus\" + '\\t' + \"minus_plus\" + '\\t' + \"p_value_same_opposite\" + '\\t' + \"p-value_same_opposite_Bonferoni_corrected\" + '\\t' + \"Ratio_same_opposite\" + '\\t' + \"p_value_divergent_convergent\" + '\\t' + \"p_value_divergent_convergent Bonferoni corrected\" + '\\t' + \"Ratio divergent convergent\" + '\\n')\n for i in range(len(NamesL_pairs)):\n datafile.write(\n NamesL_pairs[i][0] + '\\t' + NamesL_pairs[i][1] + '\\t' + str(p_pL[i]) + '\\t' + str(m_mL[i]) + '\\t' + str(\n p_mL[i]) + '\\t' + str(m_pL[i]) + '\\t' + str(p_valsL[i]) + '\\t' + str(p_vals_BonferoniL[i]) + '\\t' + str(\n RatiosL[i]) + '\\t' + str(p_valsL_divergent_convergent[i]) + '\\t' + str(\n p_valsL_divergent_convergent_BonferoniL[i]) + '\\t' + str(RatiosL_divergent_convergent[i]) + '\\n')\n datafile.close()\n return",
"def split_otu_table_on_sample_metadata(otu_table_f, mapping_f, mapping_field):\r\n mapping_f = list(mapping_f)\r\n mapping_values = get_mapping_values(mapping_f, mapping_field)\r\n otu_table = parse_biom_table(otu_table_f)\r\n\r\n for v in mapping_values:\r\n v_fp_str = v.replace(' ', '_')\r\n sample_ids_to_keep = sample_ids_from_metadata_description(\r\n mapping_f, valid_states_str=\"%s:%s\" % (mapping_field, v))\r\n\r\n try:\r\n filtered_otu_table = otu_table.filterSamples(\r\n lambda values, id_, metadata: id_ in sample_ids_to_keep)\r\n except TableException:\r\n # all samples are filtered out, so no otu table to write\r\n continue\r\n yield v_fp_str, format_biom_table(filtered_otu_table)",
"def formatOutput(tranisitionTable):\n \n # TODO: Make all the processing done in this function, rather than main\n for key in sorted(transitionTable.iterkeys()):\n transitionTable[key].sort(key = operator.itemgetter(1)) #.sort is in-place\n \n return transitionTable",
"def make_table(ranked_means):\n fp = open(\"table.tex\", \"w\")\n fp.write(\"\"\"\\\\begin{tabular}{|l|c||l|c|}\n \\\\hline\n \\\\multicolumn{2}{|c||}{Slowest} & \\\\multicolumn{2}{|c|}{Fastest} \\\\\\\\ \\\\hline\n Feature & Rate & Feature & Rate \\\\\\\\ \\\\hline\n\"\"\")\n top_10 = ranked_means[0:10]\n bottom_10 = ranked_means[-10:]\n for ((f_rate, f_name),(s_rate,s_name)) in zip(top_10, bottom_10):\n f_name = f_name.split(\":\")[-1]\n f_name = f_name.rsplit(\" \", 1)[0] if f_name.endswith(\"(V)\") else f_name\n s_name = s_name.split(\":\")[-1]\n s_name = s_name.rsplit(\" \", 1)[0] if s_name.endswith(\"(V)\") else s_name\n fp.write(\" %s & %.2f & %s & %.2f \\\\\\\\ \\n\" % \\\n (f_name, f_rate, s_name, s_rate))\n fp.write(\"\\\\hline\\n\")\n fp.write(\"\\\\end{tabular}\\n\")\n fp.close()\n\n fp = open(\"supp_meaning_table.tex\", \"w\")\n fp.write(\"\"\"\\\\begin{tabular}{|l|c||l|c||l|c||l|c|}\n \\\\hline\n Meaning & Category & Meaning & Category & Meaning & Category & Meaning & Category\\\\\\\\ \\\\hline\n\n\"\"\")\n feature_names = [f.split(\":\")[-1] for (r,f) in ranked_means]\n feature_names.sort(key=lambda s: s.lower())\n col1 = feature_names[0:25]\n col2 = feature_names[25:50]\n col3 = feature_names[50:75]\n col4 = feature_names[75:]\n for a,b,c,d in zip(col1,col2,col3,col4):\n x,y,z,w = [get_meaning_category(i) or \"Excluded\" for i in (a,b,c,d)]\n # Lop off (V)s (we needed them above for get_meaning_category to work)\n a,b,c,d = [f.rsplit(\" \", 1)[0] if f.endswith(\"(V)\") else f for f in (a,b,c,d)]\n fp.write(\"%s & %s & %s & %s & %s & %s & %s & %s\\\\\\\\ \\n\" % (a, x, b, y, c, z, d, w))\n fp.write(\"\\\\hline\\n\")\n fp.write(\"\\\\end{tabular}\\n\")\n fp.close()",
"def print_tsv(data, filename):\n with open(filename, 'wt') as fout:\n writefile = partial(print, sep='\\t', file=fout)\n writefile('Sample', *expected_header)\n for sample in data:\n for entry in data[sample]:\n writefile(sample, *(entry[field] for field in expected_header))"
] |
[
"0.60229045",
"0.5637049",
"0.5637049",
"0.53095335",
"0.53014034",
"0.52432746",
"0.5227828",
"0.51772594",
"0.51730895",
"0.51567525",
"0.51420665",
"0.51343006",
"0.511792",
"0.511474",
"0.5104135",
"0.5101256",
"0.50933254",
"0.5086502",
"0.5055283",
"0.5043328",
"0.50231004",
"0.50162965",
"0.500538",
"0.500419",
"0.49976316",
"0.49925217",
"0.4971346",
"0.49679187",
"0.49610928",
"0.4948993"
] |
0.5961496
|
1
|
reformat count or FPKM tables into flattened table of sample_names/values for rapid alignment of attr table with tracking_id
|
def reformat_countOrFPKMTable(
self,
countOrFPKMTable_I=None,
analysis_id_I=None,
sna2experimentID_I=None,
sna2sns_I=None,
count_or_FPKM = 'count'):
#format into a dictionary of rows for quick aligning with the tracking_id
countOrFPKMTable_flat = [];
for row in countOrFPKMTable_I:
for k,v in row.items():
if k=='tracking_id':continue;
tmp = {};
tmp['analysis_id'] = analysis_id_I;
tmp['tracking_id'] = row['tracking_id'];
sample_name_lst = k.split('_');
sample_name_base = '_'.join(sample_name_lst[:-1]);
sample_name_rep = eval(sample_name_lst[-1]);
if sna2experimentID_I:
experiment_id = sna2experimentID_I[sample_name_base];
else:
experiment_id=None;
tmp['experiment_id'] = experiment_id;
if sna2sns_I:
sample_name = sna2sns_I[sample_name_base][sample_name_rep];
else:
sample_name=k;
tmp['sample_name'] = sample_name;
tmp['value'] = v;
tmp['value_units'] = count_or_FPKM;
tmp['used_'] = True;
tmp['comment_'] = None;
countOrFPKMTable_flat.append(tmp);
return countOrFPKMTable_flat;
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def alignAndReformat_countFPKMattrTables(\n self,analysis_id_I=None,sna2experimentID_I=None,\n sna2sns_I=None):\n #reformat\n countTable_flat = self.reformat_countTable(\n analysis_id_I=analysis_id_I,\n sna2experimentID_I=sna2experimentID_I,\n sna2sns_I=sna2sns_I,);\n fpkmTable_flat = self.reformat_fpkmTable(\n analysis_id_I=analysis_id_I,\n sna2experimentID_I=sna2experimentID_I,\n sna2sns_I=sna2sns_I,);\n attrTable_dict = self.reformat_attrTable();\n #align\n countAndFpkmTable_aligned = [];\n for row in countTable_flat[:]:\n row.update(attrTable_dict[row['tracking_id']]);\n countAndFpkmTable_aligned.append(row);\n for row in fpkmTable_flat[:]:\n row.update(attrTable_dict[row['tracking_id']]);\n countAndFpkmTable_aligned.append(row);\n return countAndFpkmTable_aligned;",
"def reformat_countTable(\n self,analysis_id_I=None,sna2experimentID_I=None,\n sna2sns_I=None):\n if self.countTable: countTable = self.countTable[:];\n else: countTable = [];\n\n countTable_flat = self.reformat_countOrFPKMTable(\n countOrFPKMTable_I=countTable,\n analysis_id_I=analysis_id_I,\n sna2experimentID_I=sna2experimentID_I,\n sna2sns_I=sna2sns_I,\n count_or_FPKM = 'count');\n return countTable_flat;",
"def reformat_fpkmTable(\n self,analysis_id_I=None,sna2experimentID_I=None,\n sna2sns_I=None):\n if self.fpkmTable: fpkmTable = self.fpkmTable[:];\n else: fpkmTable = [];\n\n fpkmTable_flat = self.reformat_countOrFPKMTable(\n countOrFPKMTable_I=fpkmTable,\n analysis_id_I=analysis_id_I,\n sna2experimentID_I=sna2experimentID_I,\n sna2sns_I=sna2sns_I,\n count_or_FPKM = 'fpkm');\n return fpkmTable_flat;",
"def reformat_attrTable(\n self):\n #format into a dictionary of rows for quick aligning with the tracking_id\n if self.attrTable: attrTable = self.attrTable[:];\n else: attrTable = [];\n\n attrTable_dict = {};\n for row in attrTable:\n attrTable_dict[row['tracking_id']] = row;\n return attrTable_dict;",
"def freq_table(a):\n Detail_freq = a.loc[:, (a.dtypes == object) | (a.dtypes == long) ].columns.get_values().tolist()\n print(Detail_freq)\n for freq in Detail_freq:\n df1 = pd.DataFrame(a[freq].value_counts(dropna=False).astype(float).map('{:20,.0f}'.format).sort_index()).rename(columns={freq:'Count'})\n df2 = pd.DataFrame(a[freq].value_counts(normalize = True, dropna=False).map('{:,.2%}'.format).sort_index()).rename(columns={freq:'Percentage'})\n df = pd.concat([df1, df2], axis = 1)\n print(df)",
"def _finalize(self, sampled_data):\n final_data = dict()\n for table_name, table_rows in sampled_data.items():\n parents = self.metadata.get_parents(table_name)\n if parents:\n for parent_name in parents:\n foreign_key = self.metadata.get_foreign_key(parent_name, table_name)\n if foreign_key not in table_rows:\n parent_ids = self._find_parent_ids(table_name, parent_name, sampled_data)\n table_rows[foreign_key] = parent_ids\n\n reversed_data = self.metadata.reverse_transform(table_name, table_rows)\n\n fields = self.metadata.get_fields(table_name)\n\n final_data[table_name] = reversed_data[list(fields.keys())]\n\n return final_data",
"def format_unifrac_sample_mapping(sample_ids, otu_ids, otu_table_array):\r\n out = []\r\n for i, row in enumerate(otu_table_array):\r\n for j, val in enumerate(row):\r\n if val > 0:\r\n line = [otu_ids[i], sample_ids[j], str(val)]\r\n out.append('\\t'.join(line))\r\n return out",
"def per_sample_taxa_summaries(open_table, output_format):\n t = parse_biom_table(open_table)\n header = \"#taxon\\trelative_abundance\\n\"\n\n for v, id_, md in t.iter():\n with open(output_format % id_, 'w') as f:\n f.write(header)\n\n for sorted_v, taxa in \\\n sorted(zip(v, t.ids(axis='observation')))[::-1]:\n if sorted_v:\n f.write(\"%s\\t%f\\n\" % (taxa, sorted_v))",
"def reformat_data(self, df, ids):\n data = np.zeros((len(ids), self.n_sample_rows + 1, self.n_features))\n idx = 0\n for i in ids:\n sample = df.loc[i]\n data[idx, 0:89, :] = sample.values\n data[idx, 89, :] = np.mean(sample.values)\n idx += 1\n return data",
"def produce_mirna_single_counts_table(count_files, table_file):\n merge_count_tables(count_files, table_file, \".dedup.single.bam.mirbase_counts.txt\")",
"def make_summary_tables( res ):\n\n # transform second table to csv and read this as a dataFrame\n result_fit_df = pd.read_csv(StringIO( res.tables[1].as_csv() ), sep=\",\",index_col=0)\n result_fit_df.columns = [i.strip() for i in result_fit_df.columns]\n result_fit_df.index = [i.strip() for i in result_fit_df.index]\n\n # first table is trickier because the data is spread on to columns, and there is title line\n L = res.tables[0].as_html().split('\\n')\n L.pop(1) # get rid of the title\n tmp = pd.read_html('\\n'.join(L) , header=None)[0] # read as a dataframe, but with 4 columns \n\n names = list(tmp[0]) + list(tmp[2])[:-2] # columns 0 and 2 are metric names\n values = list(tmp[1]) + list(tmp[3])[:-2] # columns 1 and 3 are the corresponding values\n # NB : I exclude the last 2 elements which are empty \n \n result_general_df = pd.DataFrame( {'Name': names , 'Value' : values}, index = names , columns=['Value'] )\n \n return result_general_df , result_fit_df",
"def normalize_counts_to_tpm(counts_dir, gff_dir, out_dir, feat='CDS', id_sym='gene_id='):\n count_files = [os.path.join(counts_dir, f) for f in os.listdir(counts_dir)]\n all_tpms = {}\n for cf in count_files:\n if \"_counts\" not in cf:\n continue\n tpm = normalize_counts_to_tpm_one_file(cf, gff_dir, feat, id_sym)\n #out_file = \"{}_tpm.csv\".format(os.path.basename(cf))\n #out_path = os.path.join(out_dir, out_file)\n #with open(out_path, \"w\") as fo:\n # for gene, t in tpm.items():\n # fo.write(\"{},{}\\n\".format(gene, t))\n prefix = os.path.basename(cf).split(\"_trimmed\")[0] # this would be specific to my naming convention\n all_tpms[prefix] = tpm\n return all_tpms",
"def _create_counts(out_dts, out_dir):\n ma, ma_mirna = _merge(out_dts)\n out_ma = op.join(out_dir, \"counts.tsv\")\n out_ma_mirna = op.join(out_dir, \"counts_mirna.tsv\")\n ma.to_csv(out_ma, sep=\"\\t\")\n ma_mirna.to_csv(out_ma_mirna, sep=\"\\t\")\n return out_ma_mirna, out_ma",
"def print_tables(hash_table, f_output, l_samples):\n\n l_fields = ['chr', 'pos', 'ref', 'alt', 'QUAL', 'FILTER',\n 'Func.refGene', 'Gene.refGene', 'GeneDetail.refGene', 'ExonicFunc.refGene', 'AAChange.refGene',\n 'cytoBand', 'ExAC_ALL', 'ExAC_AFR', 'ExAC_AMR', 'ExAC_EAS', 'ExAC_FIN', 'ExAC_NFE', 'ExAC_OTH',\n 'ExAC_SAS',\n 'avsnp147', 'SIFT_score', 'SIFT_pred', 'Polyphen2_HDIV_score', 'Polyphen2_HDIV_pred',\n 'Polyphen2_HVAR_score',\n 'Polyphen2_HVAR_pred', 'LRT_score', 'LRT_pred', 'MutationTaster_score', 'MutationTaster_pred',\n 'MutationAssessor_score', 'MutationAssessor_pred', 'FATHMM_score', 'FATHMM_pred', 'PROVEAN_score',\n 'PROVEAN_pred', 'VEST3_score', 'CADD_raw', 'CADD_phred', 'DANN_score', 'fathmm-MKL_coding_score',\n 'fathmm-MKL_coding_pred', 'MetaSVM_score', 'MetaSVM_pred', 'MetaLR_score', 'MetaLR_pred',\n 'integrated_fitCons_score', 'integrated_confidence_value', 'GERP++_RS', 'phyloP7way_vertebrate',\n 'phyloP20way_mammalian', 'phastCons7way_vertebrate', 'phastCons20way_mammalian', 'SiPhy_29way_logOdds']\n l_fields = l_fields + l_samples\n \n l_chr = set([item[0] for item in hash_table.keys()])\n\n fo = open(f_output, 'w')\n fo.write(','.join(l_fields) + '\\n')\n for key in sorted(hash_table.keys(), key=itemgetter(1)):\n fo.write(','.join(map(lambda field: hash_table[key].get(field, '.'), l_fields)) + '\\n')\n fo.close()",
"def get_split_summary_table(all_df, train_df, test_df):\n table = PrettyTable()\n table.field_names = ['set', 'N total', 'N non-ICH', 'N ICH', 'frac non-ICH', 'frac ICH']\n for df, name in zip([all_df, train_df, test_df],['All', 'Train', 'Test']):\n table.add_row([name, len(df), len(df[df.Hemorrhage == 0]), len(df[df.Hemorrhage == 1]),\n f'{len(df[df.Hemorrhage == 0])/len(df):.3%}', f'{len(df[df.Hemorrhage == 1])/len(df):.3%}'])\n return table",
"def prepare_batch_sample_set_for_metadata_export(path, tsca_id):\n raw = pd.read_table(path)\n print( \"%d Samples in this batch\" % raw.shape[0] )\n\n # Create dfs to upload\n all_samples = pd.concat([pd.DataFrame(index=raw.index, columns=['membership:sample_set_id'], data=tsca_id), \\\n raw[ ['sample_id', 'sample_type'] ]], axis=1)\n\n\n tumors = all_samples.loc[ all_samples['sample_type'] == \"Tumor\", ['membership:sample_set_id', 'sample_id'] ]\n tumors.loc[: , 'membership:sample_set_id'] = \"%s_T\"%tsca_id\n \n normals = all_samples.loc[ all_samples['sample_type'] == \"Normal\", ['membership:sample_set_id', 'sample_id'] ]\n normals.loc[: , 'membership:sample_set_id'] = \"%s_N\"%tsca_id\n\n all_samples = all_samples.drop('sample_type', axis=1)\n return (all_samples, tumors, normals)",
"def produce_mirna_allbest_counts_table(count_files, table_file):\n merge_count_tables(count_files, table_file, \".dedup.bam.mirbase_counts.txt\")",
"def write_count_table(filtered_counts, gene_names, ids_liver_header):\n with open(\"filtered_tcga_counts.tsv\", \"w\") as file:\n file.write(\"gene_id\\tgene_name\\t\" + \"\\t\".join(ids_liver_header) + \"\\n\")\n for gene_name, counts_line in zip(gene_names, filtered_counts):\n file.write(gene_name + \"\\t\" + \"\\t\" + \"\\t\".join(counts_line) + \"\\n\")",
"def make_lof_table(data_table, my_genes, my_samples, summary_func):\n table_header = [\"Gene\"] + my_samples + [\n \"Missense:Benign\", \"Missense:Possibly\", \"Missense:Probably\",\n \"MissenseNA\", \"Indel\", \"Nonsense\", \"Frameshift\", \"Splice-site\",\n \"Synonymous\"]\n table_records = []\n\n gs_lookup = group_data_by_gs(data_table)\n for gene in my_genes:\n synonymous = missense_benign = missense_possibly = missense_probably = \\\n missense_na = frameshift = nonsense = splice = indel = 0\n\n out_row = [gene]\n for sample in my_samples:\n normalized = [0]\n # Count mutations of each type for this gene and sample\n for entry in gs_lookup[gene][sample]:\n if entry['muttype'] == 'Silent':\n synonymous += 1\n continue\n if entry['muttype'] == 'Intron':\n # Shouldn't be here; ignore\n continue\n\n if entry['muttype'] == 'Missense_Mutation':\n if entry['consequence'] == 'benign':\n missense_benign += 1\n elif entry['consequence'] == 'possibly':\n missense_possibly += 1\n elif entry['consequence'] == 'probably':\n missense_probably += 1\n elif entry['consequence'] == 'NA':\n missense_na += 1\n else:\n print(\"Unhandled missense consequence level:\",\n entry['consequence'], file=sys.stderr)\n elif entry['muttype'] == 'Nonsense_Mutation':\n nonsense += 1\n elif entry['muttype'] == 'Splice_Site':\n splice += 1\n elif entry['muttype'] in ('Frame_Shift_Ins', 'Frame_Shift_Del'):\n frameshift += 1\n elif entry['muttype'] in ('In_Frame_Ins', 'In_Frame_Del'):\n indel += 1\n else:\n print(\"Unhandled mutation type:\", entry['muttype'],\n file=sys.stderr)\n continue\n\n normalized.append(entry['normalized'])\n # Summarize the normalized mutation counts for this gene and sample\n out_row.append(summary_func(normalized))\n out_row.extend((missense_benign, missense_possibly, missense_probably,\n missense_na, indel, nonsense, frameshift, splice,\n synonymous))\n table_records.append(out_row)\n\n return pandas.DataFrame.from_records(table_records, columns=table_header)",
"def join_gene_tables(gene_tables,output,verbose=None):\n \n gene_table_data={}\n start_column_id=\"\"\n samples=[]\n file_basenames=[]\n index=0\n for gene_table in gene_tables:\n \n if verbose:\n print(\"Reading file: \" + gene_table)\n \n lines=util.process_gene_table_with_header(gene_table, allow_for_missing_header=True)\n header=next(lines)\n \n # get the basename of the file\n file_basename='.'.join(os.path.basename(gene_table).split('.')[:-1])\n file_basenames.append(file_basename)\n \n if header:\n header_info=header.split(GENE_TABLE_DELIMITER)\n if not start_column_id:\n start_column_id=header_info[0]\n # allow for multiple samples\n sample_names=header_info[1:]\n else:\n # if there is no header in the file then use the file name as the sample name\n sample_names=[file_basename]\n \n for line in lines:\n data=line.split(GENE_TABLE_DELIMITER)\n try:\n gene=data[0]\n # if the header names multiple samples, merge all samples\n # this prevents extra columns from being included in some rows\n # this requires files containing multiple samples to include a header\n data_points=data[1:len(sample_names)+1]\n except IndexError:\n gene=\"\"\n\n if gene:\n current_data=gene_table_data.get(gene,\"\")\n fill = index - current_data.count(GENE_TABLE_DELIMITER)\n if fill > 0:\n # fill in zeros for samples without data then add data point\n gene_table_data[gene]=current_data + GENE_TABLE_DELIMITER.join([\"0\"]*fill) + GENE_TABLE_DELIMITER + GENE_TABLE_DELIMITER.join(data_points) + GENE_TABLE_DELIMITER\n elif fill < 0:\n # add data point to other data point from the same sample\n current_data_points=current_data.split(GENE_TABLE_DELIMITER)\n for i,point in enumerate(data_points):\n store_index=len(data_points)*-1-1+i\n current_data_points[store_index]=str(float(current_data_points[store_index])+float(point))\n gene_table_data[gene] = GENE_TABLE_DELIMITER.join(current_data_points)\n else:\n # add data point to end of list\n gene_table_data[gene] = current_data + GENE_TABLE_DELIMITER.join(data_points) + GENE_TABLE_DELIMITER\n \n samples+=sample_names\n index+=len(sample_names)\n # if all of the header names for the files are the same\n # then use the file names as headers\n if samples.count(samples[0]) == len(samples):\n samples=file_basenames\n \n # write the joined gene table\n if not start_column_id:\n start_column_id=\"# header \"\n sample_header=[start_column_id]+samples\n total_gene_tables=len(samples)\n sorted_gene_list=util.fsort(list(gene_table_data))\n try:\n file_handle=open(output,\"w\")\n file_handle.write(GENE_TABLE_DELIMITER.join(sample_header)+\"\\n\")\n except EnvironmentError:\n sys.exit(\"Unable to write file: \" + output) \n \n for gene in sorted_gene_list:\n # extend gene data for any gene that is not included in all samples\n current_data=gene_table_data[gene]\n fill = total_gene_tables - current_data.count(GENE_TABLE_DELIMITER)\n if fill:\n current_data=current_data + GENE_TABLE_DELIMITER.join([\"0\"]*fill) + GENE_TABLE_DELIMITER\n file_handle.write(gene+GENE_TABLE_DELIMITER+current_data.rstrip(GENE_TABLE_DELIMITER)+\"\\n\")\n \n file_handle.close()",
"def samples(app, args):\n engine = create_engine(args.datafile)\n meta = MetaData()\n meta.reflect(engine)\n print(\"\\t\".join([str(x).replace('counts.', '')\n for x in meta.tables['counts'].columns\n if not x == 'counts.index']))",
"def format_df(df):\n df = df.transpose()\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop('gene', inplace=True)\n df.index.rename('sample_id', inplace=True)\n return df",
"def format_df(df):\n df = df.transpose()\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop('gene', inplace=True)\n df.index.rename('sample_id', inplace=True)\n return df",
"def tranform_data(args):\n data_dir = args.data_dir\n save_dir = args.save_dir\n os.makedirs(save_dir)\n maxsize = args.maxsize\n maxlen = args.maxlen \n ext = args.ext\n datetime = args.datetime\n classes = set()\n nb_files = 0\n time_col = 'time'\n event_col = 'event'\n gt_ids = None\n if args.ext == \"pkl\":\n with open(Path(args.data_dir, \"fx_labels\"), \"rb\") as fp:\n gt_ids = pickle.load(fp)[:maxsize]\n labels = np.unique(gt_ids)\n gt_data = []\n for i in range (len(gt_ids)):\n gt_data.append(int(np.nonzero(gt_ids[i] == labels)[0]))\n gt = {'cluster_id': gt_data}\n print(gt_data)\n gt_table = pd.DataFrame(data=gt)\n gt_table.to_csv(Path(save_dir, 'clusters.csv'))\n if Path(args.data_dir, 'clusters.csv').exists():\n gt_ids = pd.read_csv(Path(args.data_dir, 'clusters.csv'))[:(maxsize)]\n gt_ids.to_csv(Path(save_dir, 'clusters.csv'))",
"def preprocess(df):\n \n # drop the following columns - irrelevant now\n DROP_COLUMNS = ['id', 'original_title', 'release_date'\n , 'tmdbId', 'popularity', 'year']\n df.drop(DROP_COLUMNS, axis=1, inplace=True)\n \n # drop all of the language columns\n DROP_COLUMNS = [col for col in df.columns if col[:3]==\"lan\"]\n df.drop(DROP_COLUMNS, axis=1, inplace=True)\n\n # loop through the columns we want to aggregate\n for col_type in [\n \"original_language_\"\n , \"prod_comp_cntry_\"\n , \"prod_comp_names_\"\n , \"writers_\"\n , \"actors_\"\n , \"genres_\"\n , \"director_\"\n ]:\n # create a dictionary of each unique value and its frequency\n val_freq = {}\n for col in df.columns:\n if col.startswith(col_type):\n val_freq[col] = df[col].sum()\n\n # create a dataframe from this dictionary; sort by count\n counts = pd.DataFrame.from_dict(\n val_freq\n , orient='index'\n , columns=['count']\n ).sort_values('count', ascending=False)\n counts['frac'] = counts['count'].apply(lambda x: 100*x / df.shape[0])\n\n # handle special case of production company country\n if col_type == \"prod_comp_cntry_\":\n DROP_COLUMNS = [col for col in counts.index][3:]\n\n # handle special case of directors\n elif col_type == \"director_\":\n DIRECTOR_COLS = [col for col in df.columns\n if col.startswith(\"director_\")\n and col!=\"director_pop\"]\n df['established_director'] = df[DIRECTOR_COLS].max(axis=1)\n DROP_COLUMNS = DIRECTOR_COLS\n\n # handle special case of actors\n elif col_type == \"actors_\":\n ACTORS_COLS = [col for col in df.columns if \"actors\" in col]\n df['num_top_100_actors'] = df[ACTORS_COLS].sum(axis=1)\n DROP_COLUMNS = ACTORS_COLS\n\n # handle all the other cases\n else:\n DROP_COLUMNS = [col for col in counts.query('frac < 2').index]\n\n\n df.drop(DROP_COLUMNS, axis=1, inplace=True)\n \n ##########################################################################\n # adjust the data for inflation\n CPI_tf = df['CPIAUCSL'].max()\n df['budget'] = df[['budget', 'CPIAUCSL']].apply(\n cpi_adjust\n , args=(CPI_tf ,)\n , axis=1\n )\n df['revenue'] = df[['revenue', 'CPIAUCSL']].apply(\n cpi_adjust\n , args=(CPI_tf ,)\n , axis=1\n )\n # no longer need CPI data\n df.drop('CPIAUCSL', axis=1, inplace=True)\n \n ########################################################################## \n # add in useful features about the cast and crew \n df['cast_crew_sum_pop'] = (\n df['director_pop']\n + df['avg_actor_pop']\n + df['avg_writer_pop']\n )\n df['cast_crew_product_pop'] = (\n df['director_pop']\n * df['avg_actor_pop']\n * df['avg_writer_pop']\n )\n df['runtime'].replace(to_replace=0, value=df['runtime'].median(), inplace=True)\n df = df.query('10000 <= revenue').copy()\n df = df.query('100000 <= budget').copy()\n df.drop('sum_actor_pop', axis=1, inplace=True)\n df.drop('min_writer_pop', axis=1, inplace=True)\n\n # code to transform columns\n for col in [\n \"budget\", \"director_pop\", \"avg_writer_pop\"\n , \"max_writer_pop\", \"avg_actor_pop\", \"max_actor_pop\"\n , \"min_actor_pop\", 'cast_crew_sum_pop'\n , 'cast_crew_product_pop'\n ]:\n df['log10_'+col] = df[col].apply(lambda x: math.log10(x))\n df.drop(col, axis=1, inplace=True)\n \n return df",
"def gen_tab(cat):\n\n col = ['FLUX_APER2','FLUX_APER4','FLUX_APER5','FLUX_APER8','FLUX_APER10','FLUX_APER14',\n 'MAG_APER2','MAG_APER4','MAG_APER5','MAG_APER8','MAG_APER10','MAG_APER14',\n 'MAG_AUTO','MAG_PETRO','KRON_RADIUS',\n 'PETRO_RADIUS','FLUX_MAX','ISOAREAF_IMAGE','x',\n 'y','ra','dec','X2_IMAGE','Y2_IMAGE','XY_IMAGE',\n 'THETA_IMAGE','X2WIN_IMAGE','Y2WIN_IMAGE','XYWIN_IMAGE','AWIN_IMAGE','BWIN_IMAGE',\n 'THETAWIN_IMAGE','AWIN_WORLD','BWIN_WORLD','THETAWIN_WORLD',\n 'MU_MAX','FLAGS','FWHM_IMAGE','ELONGATION','SEX_CLASS','FLUX_RADIUS25',\n 'FLUX_RADIUS50','FLUX_RADIUS85','FLUX_RADIUS95','FLUX_RADIUS99']\n print('generating features table: {}'.format(cat))\n tab = pd.read_table(cat,skiprows=41,sep=r'\\s+',header=None, names=col)\n\n # crop the image for just using the central part of the image\n tab = crop(tab)\n\n # add concentration column by subtracting mag10 by mag5, rejecting the detections with negative concentration\n tab['CONCENT'] = tab.MAG_APER5 - tab.MAG_APER10\n tab = tab[tab.CONCENT > 0]\n\n # normalizing the columns\n print('normalizing features...')\n seesq_norm = ['X2_IMAGE','Y2_IMAGE','X2WIN_IMAGE',\n 'Y2WIN_IMAGE','XY_IMAGE','XYWIN_IMAGE',\n 'ISOAREAF_IMAGE']\n see_norm = ['AWIN_WORLD','AWIN_WORLD','FWHM_IMAGE',\n 'KRON_RADIUS','PETRO_RADIUS','FLUX_RADIUS25',\n 'FLUX_RADIUS50','FLUX_RADIUS85',\n 'FLUX_RADIUS95','FLUX_RADIUS99']\n mag_norm = ['MAG_APER4','MAG_APER5','MAG_APER8',\n 'MAG_APER10','MAG_APER14','MAG_AUTO',\n 'MAG_PETRO','MU_MAX','CONCENT']\n flux_norm = ['FLUX_APER2','FLUX_APER4','FLUX_APER5',\n 'FLUX_APER8','FLUX_APER10','FLUX_APER14']\n fwhm_mean = tab.FWHM_IMAGE.mean()\n for seesq_col in seesq_norm:\n tab[seesq_col] = tab[seesq_col] / (fwhm_mean**2)\n for see_col in see_norm:\n tab[see_col] = tab[see_col] / fwhm_mean\n for mag_col in mag_norm:\n tab[mag_col] = tab[mag_col] * tab['MAG_APER2']\n for flux_col in flux_norm:\n tab[flux_col] = tab[flux_col] * tab['FLUX_MAX']\n tab['CONCENT'] = -1 * tab['CONCENT']\n\n # add column for galactic latitude\n print('calculating galactic latitude...')\n ra = np.array(tab['ra'].values)\n dec = np.array(tab['dec'].values)\n pos = SkyCoord(ra=ra*u.degree, dec=dec*u.degree, frame='icrs')\n tab['b'] = list(pos.galactic.b.deg)\n\n tab.drop(['MAG_APER2','FLUX_MAX','x','y'], axis=1, inplace=True)\n tab.to_csv(cat[:-4]+'.csv', index=False, header=True)",
"def build_table():\n with contextlib.ExitStack() as stack:\n files = [stack.enter_context(gzip.open(f, 'rt')) for f in sys.argv[1:]]\n iters = [(line.split() for line in f) for f in files]\n for it in iters:\n next(it)\n key = operator.itemgetter(0)\n table = []\n for k, g in itertools.groupby(merge(*iters, key=key), key=key):\n props = list(g)\n if len(props) == len(iters):\n table.append([k] + [x[1] for x in props])\n for snp in table:\n print(*snp)",
"def print_table(source, count=False):\n table_value = []\n table_header = []\n for source_key, source_value in source.items():\n for item in source_value:\n table_value.append([v for v in item.values()])\n table_header.append([k for k in item.keys()])\n if not count:\n print(tabulate(table_value,\n headers=table_header[0],\n tablefmt='orgtbl'))\n else:\n print(tabulate([[len(source_value)]],\n headers=[source_key],\n tablefmt='orgtbl'))",
"def make_table(ranked_means):\n fp = open(\"table.tex\", \"w\")\n fp.write(\"\"\"\\\\begin{tabular}{|l|c||l|c|}\n \\\\hline\n \\\\multicolumn{2}{|c||}{Slowest} & \\\\multicolumn{2}{|c|}{Fastest} \\\\\\\\ \\\\hline\n Feature & Rate & Feature & Rate \\\\\\\\ \\\\hline\n\"\"\")\n top_10 = ranked_means[0:10]\n bottom_10 = ranked_means[-10:]\n for ((f_rate, f_name),(s_rate,s_name)) in zip(top_10, bottom_10):\n f_name = f_name.split(\":\")[-1]\n f_name = f_name.rsplit(\" \", 1)[0] if f_name.endswith(\"(V)\") else f_name\n s_name = s_name.split(\":\")[-1]\n s_name = s_name.rsplit(\" \", 1)[0] if s_name.endswith(\"(V)\") else s_name\n fp.write(\" %s & %.2f & %s & %.2f \\\\\\\\ \\n\" % \\\n (f_name, f_rate, s_name, s_rate))\n fp.write(\"\\\\hline\\n\")\n fp.write(\"\\\\end{tabular}\\n\")\n fp.close()\n\n fp = open(\"supp_meaning_table.tex\", \"w\")\n fp.write(\"\"\"\\\\begin{tabular}{|l|c||l|c||l|c||l|c|}\n \\\\hline\n Meaning & Category & Meaning & Category & Meaning & Category & Meaning & Category\\\\\\\\ \\\\hline\n\n\"\"\")\n feature_names = [f.split(\":\")[-1] for (r,f) in ranked_means]\n feature_names.sort(key=lambda s: s.lower())\n col1 = feature_names[0:25]\n col2 = feature_names[25:50]\n col3 = feature_names[50:75]\n col4 = feature_names[75:]\n for a,b,c,d in zip(col1,col2,col3,col4):\n x,y,z,w = [get_meaning_category(i) or \"Excluded\" for i in (a,b,c,d)]\n # Lop off (V)s (we needed them above for get_meaning_category to work)\n a,b,c,d = [f.rsplit(\" \", 1)[0] if f.endswith(\"(V)\") else f for f in (a,b,c,d)]\n fp.write(\"%s & %s & %s & %s & %s & %s & %s & %s\\\\\\\\ \\n\" % (a, x, b, y, c, z, d, w))\n fp.write(\"\\\\hline\\n\")\n fp.write(\"\\\\end{tabular}\\n\")\n fp.close()",
"def _transform_idoc(df):\n global _SIMPLECOUNT_COLUMNS\n\n try:\n df['comcnty'] = ((df['comcnty'] + 1) / 2).astype(int)\n df.columns = ['year', 'fk_simplecount_county'] + df.columns.tolist()[2:]\n\n indicator_list = [1600, 1601, 1602, 1603, 1604, 1605, 1606, 1607, 1620, 1621]\n \n c_nc = df['admtypo3'] == 1\n c_tv = df['admtypo3'] == 2\n c_pers = df['offtype2'] == 1 # df['offtype'] == 1\n c_prop = df['offtype2'] == 2 # df['offtype'] == 2\n c_sex = df['offtype2'] == 4 # df['offtype'] == 4\n c_drug = df['offtype2'].isin([3.1, 3.2, 3.3, 3.4, 3.5, 3.6]) # df['offtype'] == 3\n c_other = df['offtype2'].isin([0, 3, 5, 7]) # df['offtype'] == 7\n c_viol = df['offtype'] == 1\n c_male = df['sex'] == 'M'\n c_female = ~c_male\n\n c_first2 = [c_nc, c_tv]\n c_others = [c_pers, c_prop, c_sex, c_drug, c_other, c_viol, c_male, c_female]\n \n def helper(c, indicator_id, first2):\n df['fk_simplecount_indicator'] = indicator_id\n g = ['fk_simplecount_indicator', 'year', 'fk_simplecount_county']\n if first2:\n return df[c].groupby(g).size().reset_index(name='value')\n else:\n return df[c_nc & c].groupby(g).size().reset_index(name='value')\n\n out = pd.DataFrame()\n for i in range(2):\n out = out.append(helper(c_first2[i], indicator_list[i], first2=True))\n \n for i in range(len(c_others)):\n out = out.append(helper(c_others[i], indicator_list[i+2], first2=False))\n\n out = out.loc[out['fk_simplecount_county'].isin(range(1,102+1))]\n return out[_SIMPLECOUNT_COLUMNS]\n except:\n raise"
] |
[
"0.5996871",
"0.5834174",
"0.56089836",
"0.5596423",
"0.5548671",
"0.554599",
"0.5534935",
"0.548897",
"0.5448419",
"0.5394029",
"0.537485",
"0.5362557",
"0.5339601",
"0.53235745",
"0.52827585",
"0.5272961",
"0.5233933",
"0.522556",
"0.52244353",
"0.5220203",
"0.5207982",
"0.5201744",
"0.5201744",
"0.5178545",
"0.51763254",
"0.5175003",
"0.5160906",
"0.51356107",
"0.5124829",
"0.51237833"
] |
0.63312393
|
0
|
View for rendering hours as json.
|
def json_hours(request):
current_site = Site.find_for_request(request)
if request.method == 'GET':
if request.GET.get('fallback'):
fallback = request.GET['fallback']
return JsonResponse(
{
'llid': get_default_unit().location.libcal_library_id,
}
)
else:
libcalid = request.GET['libcalid']
all_building_hours = json.dumps(get_building_hours_and_lid(current_site))
return JsonResponse(
{
'all_building_hours': all_building_hours,
'current_hours': get_json_hours_by_id(int(libcalid), all_building_hours),
'llid': libcalid,
'llid_fallback': get_default_unit().location.libcal_library_id,
}
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def display_hours(employee_id):\n\n if not g.user:\n flash(\"Please Login to continue.\", \"danger\")\n return redirect(\"/\")\n \n employee = Employee.query.get_or_404(employee_id)\n\n labels = json.dumps( [\"Completed\", \"Required\"])\n data = json.dumps([employee.completed, employee.required])\n \n return render_template(\"users/display_hours.html\", employee = employee, labels = labels, data = data)",
"def _draw_hours(self):\n tmp_str_list = []\n for i in range(0, self._g_width, self._min_grid):\n if i % self._hour_grid == 0:\n tmp_str_list.append('<polyline class=\"FullHour\" points=\"%d,%d, %d,%d\" />' % (\n i + .5 + 20, 20, i + .5 + 20, self._g_height))\n tmp_str_list.append('<text class=\"Label\" x=\"%d\" y=\"%d\">%d</text>' % (\n i + 20, 20, (i / self._hour_grid + self._offset) % 24))\n else:\n tmp_str_list.append('<polyline class=\"SubHour\" points=\"%d,%d,%d,%d\" />' % (\n i + .5 + 20, 20, i + .5 + 20, self._g_height))\n return \"\".join(tmp_str_list)",
"def get(self, request):\n\t\tworkingHours = GymModels.WorkingHours.objects.all()\n\t\tserializer = PeopleSerializer.WorkingHourSerializer(workingHours, many=True)\n\t\treturn Response(serializer.data)",
"def hours(self, venue_id):\n response = self._request(V2_ENDPOINTS['HOURS'] + venue_id)\n return response",
"def hours(input=None):\n return get(input).hours",
"def hours(self):\n return self.config['hours']",
"def json_frapp(request):\n from pv.settings import MEDIA_URL\n\n if request.GET.get('date') == None:\n start = datetime.combine(date.today(), time(0, 0))\n else:\n start = datetime.combine( datetime.strptime(request.GET.get('date'), '%Y-%m-%d').date(), time(0, 0))\n\n end = datetime.combine(start, time(23, 59))\n\n timeslots = TimeSlot.objects.filter(start__gte=start,start__lte=end).select_related('show').order_by('start')\n\n\n '''Generate categories object for output'''\n\n categories = Category.objects.all()\n categories_output = []\n\n for c in categories:\n c_entry = {\n 'id': c.id,\n 'color': c.color.replace('#', '').upper(),\n 'namedisplay': c.category,\n 'description': c.description\n }\n\n categories_output.append(c_entry)\n\n # Get all series for timeslots\n series = set()\n for ts in timeslots:\n series.add(ts.show)\n\n\n '''Generate series object for output'''\n\n series_output = []\n\n for s in series:\n metainfos = []\n metainfos.append({ 'key': 'ProduzentIn', 'value': ', '.join(ts.show.hosts.values_list('name', flat=True)) })\n metainfos.append({ 'key': 'E-Mail', 'value': ', '.join(ts.show.hosts.values_list('email', flat=True)) })\n\n image = '' if s.image.name == None or s.image.name == '' else str(get_current_site(request)) + MEDIA_URL + s.image.name\n url = '' if s.website == None or s.website == '' else s.website\n\n # Get active schedules for the given date\n # But include upcoming single timeslots (with rrule_id=1)\n schedules = Schedule.objects.filter( Q(show=s.id,is_repetition=False) &\n (\n Q(rrule_id__gt=1,dstart__lte=start,until__gte=start) |\n Q(rrule_id=1,dstart__gte=start)\n )\n )\n\n schedules_repetition = Schedule.objects.filter( Q(show=s.id,is_repetition=True) &\n (\n Q(rrule_id__gt=1,dstart__lte=start,until__gte=start) |\n Q(rrule_id=1,dstart__gte=start)\n )\n )\n\n broadcastinfos = ''\n\n if not schedules.exists():\n continue\n\n for schedule in schedules:\n broadcastinfos = broadcastinfos + generate_frapp_broadcastinfos(schedule)\n\n if schedules_repetition.exists():\n broadcastinfos = broadcastinfos + 'Wiederholung jeweils:'\n for schedule in schedules_repetition:\n broadcastinfos = broadcastinfos + generate_frapp_broadcastinfos(schedule)\n\n s_entry = {\n 'id': s.id,\n 'categoryid': s.category.values_list('id', flat=True)[0],\n 'color': s.category.values_list('color', flat=True)[0].replace('#', '').upper(),\n 'namedisplay': s.name,\n 'description': s.description,\n 'url': url,\n 'image': image,\n 'broadcastinfos': broadcastinfos,\n 'metainfos': metainfos\n }\n\n series_output.append(s_entry)\n\n\n '''Generate shows object for output'''\n\n shows_output = []\n\n for ts in timeslots:\n\n is_repetition = ' ' + _('REP') if ts.schedule.is_repetition is 1 else ''\n namedisplay = ts.show.name + is_repetition\n description = ts.show.description\n url = str(get_current_site(request)) + '/shows/' + ts.show.slug\n urlmp3 = ''\n\n # If there's a note to the timeslot use its title, description and url\n try:\n note = Note.objects.get(timeslot=ts.id)\n namedisplay = note.title + is_repetition\n description = note.content\n url = str(get_current_site(request)) + '/notes/' + note.slug\n urlmp3 = note.audio_url\n except ObjectDoesNotExist:\n pass\n\n ts_entry = {\n 'id': ts.id,\n 'seriesid': ts.show.id,\n 'datetimestart': ts.start.strftime('%d.%m.%Y %H:%M:%S'),\n 'datetimeend': ts.end.strftime('%d.%m.%Y %H:%M:%S'),\n 'namedisplay': namedisplay,\n 'description': description,\n 'url': url,\n 'urlmp3': urlmp3,\n }\n\n shows_output.append(ts_entry)\n\n output = {}\n output['categories'] = categories_output\n output['series'] = series_output\n output['shows'] = shows_output\n\n return HttpResponse(json.dumps(output, ensure_ascii=False).encode('utf8'),\n content_type=\"application/json; charset=utf-8\")",
"def format_hours(self, data):\n return unicode('%f' % data).rstrip('0').rstrip('.')",
"def hourly(self):\n return c.Hourly(self)",
"def output(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"type\": 2,\n \"enabled\": 1,\n \"days_of_week\": self.days,\n \"time\": self.time.output()\n }",
"def open_hours_detail(self):\n return self._open_hours_detail",
"def get_hourly_weather_details(self, hours: int = None):\n if hours is None:\n hours = 11\n forecast = super().get_weather_forecast(self.BASE_URL)\n headers = [\"date_time\",\n \"temp\",\n \"real_feel_temp\",\n \"wind_speed\",\n \"rain_probability\",\n \"cloud_cover\",\n ]\n for number in range(hours):\n data = []\n date_time = forecast[number]['DateTime']\n date_time = date_time[:16]\n date_time = date_time.replace('T', ' ')\n data.append(date_time)\n temp = round((int(\n forecast[number][\"Temperature\"][\"Value\"]) - 32) / 1.8)\n data.append(temp)\n real_feel_temp = round((int(\n forecast[number][\"RealFeelTemperature\"][\"Value\"]) - 32) / 1.8)\n data.append(real_feel_temp)\n wind_speed = forecast[number][\"Wind\"][\"Speed\"][\"Value\"]\n data.append(wind_speed)\n rain_probability = forecast[number][\"RainProbability\"]\n data.append(rain_probability)\n cloud_cover = forecast[number][\"CloudCover\"]\n data.append(cloud_cover)\n yield dict(zip(headers, data))",
"def get_24h(self):\n records = self.level_model.get_for_period(1)\n self.set_attributes(records, '24 hours')",
"def output(self):\n \n if self.days > 0:\n interval = \"%dd\" % self.days\n if self.hours > 0:\n interval = \"%dh\" % self.hours\n if self.minutes > 0:\n interval = \"%dm\" % self.minutes\n else:\n interval = \"%ds\" % self.seconds\n \n return {\n \"id\": self.id,\n \"name\": self.name,\n \"type\": 1,\n \"enabled\": 1,\n \"interval\": interval\n }",
"def output(self):\n time = \"%04d-%02d-%02d %02d:%02d:%02d\" % (self.year, self.month, \\\n self.date, self.hours, self.minutes, self.seconds)\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"type\": 4,\n \"enabled\": 1,\n \"abstime\": time\n }",
"def timesheet(request):\r\n return render(\r\n request,\r\n 'timesheet/timesheet.html'\r\n )",
"def get_hourly(self):\n pass",
"def output(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"type\": 3,\n \"enabled\": 1,\n \"days_of_month\": self.days,\n \"time\": self.time.output()\n }",
"def hourly_data(self):\n return self._hourly_data",
"def stats_format_hours(app_id, hours, hours_anon, hours_auth,\r\n max_hours, max_hours_anon, max_hours_auth):\r\n hourNewStats = dict(label=\"Anon + Auth\", disabled=\"True\", values=[], max=0)\r\n hourNewAnonStats = dict(label=\"Anonymous\", values=[], max=0)\r\n hourNewAuthStats = dict(label=\"Authenticated\", values=[], max=0)\r\n\r\n hourNewStats['max'] = max_hours\r\n hourNewAnonStats['max'] = max_hours_anon\r\n hourNewAuthStats['max'] = max_hours_auth\r\n\r\n for h in sorted(hours.keys()):\r\n # New answers per hour\r\n #hourNewStats['values'].append(dict(x=int(h), y=hours[h], size=hours[h]*10))\r\n if (hours[h] != 0):\r\n hourNewStats['values'].append([int(h), hours[h],\r\n (hours[h] * 5) / max_hours])\r\n else:\r\n hourNewStats['values'].append([int(h), hours[h], 0])\r\n\r\n # New Anonymous answers per hour\r\n if h in hours_anon.keys():\r\n #hourNewAnonStats['values'].append(dict(x=int(h), y=hours[h], size=hours_anon[h]*10))\r\n if (hours_anon[h] != 0):\r\n hourNewAnonStats['values'].append([int(h), hours_anon[h],\r\n (hours_anon[h] * 5) / max_hours])\r\n else:\r\n hourNewAnonStats['values'].append([int(h), hours_anon[h], 0])\r\n\r\n # New Authenticated answers per hour\r\n if h in hours_auth.keys():\r\n #hourNewAuthStats['values'].append(dict(x=int(h), y=hours[h], size=hours_auth[h]*10))\r\n if (hours_auth[h] != 0):\r\n hourNewAuthStats['values'].append([int(h), hours_auth[h],\r\n (hours_auth[h] * 5) / max_hours])\r\n else:\r\n hourNewAuthStats['values'].append([int(h), hours_auth[h], 0])\r\n return hourNewStats, hourNewAnonStats, hourNewAuthStats",
"def json(self):\n return {\n 'uri': self.view_uri,\n 'created': time.strftime('%c', time.gmtime(self.created)),\n 'created_timestamp': self.created,\n 'exception_type': str(self.exc_type),\n 'exception': str(self.exc_value),\n }",
"def to_json(self):\n new_dict = self.__dict__.copy()\n new_dict.update({'__class__': str(self.__class__.__name__)})\n for key, value in new_dict.items():\n if isinstance(value, datetime):\n new_dict[key] = value.strftime(self.timeformat)\n return new_dict",
"def getJSON(self):\n text = super().getJSON() + f', \"exchange\": \"{self.__exchange}\"'\n text += f', \"market pair\": \"{self.__market_pairs}\"'\n text += f', \"interval\": \"{self.__interval}\"}}'\n return text",
"def getHourColumn(self): \n return self.hourcol",
"def get_working_hour(self):\n working_hrs_id = self.search([('active', '=', True)])\n if working_hrs_id:\n return {\n 'biz_open_time': time(int(working_hrs_id.start_hour), int(working_hrs_id.start_minute), 0),\n 'biz_close_time': time(int(working_hrs_id.end_hour), int(working_hrs_id.end_minute), 0),\n 'holiday_list': {line.occ_date: line.name for line in working_hrs_id.non_working_days_line},\n 'deadline_revise_percentage': working_hrs_id.deadline_revise_percentage,\n }\n raise ValidationError(\"Working Hour configuration is missing!\")",
"def hour(self):\n return \"%s:00:00\" % self.timestamp[:13]",
"def get_hours(self, date = \"\"):\n\n if date == \"\":\n DATE = datetime.today()\n else:\n year, month, day = date.split('-')\n DATE = datetime(int(year), int(month), int(day))\n\n s = requests.get(\"https://api.wdpro.disney.go.com/facility-service/schedules/{}?date={}-{}-{}\".format(self.__id, DATE.year, self.__formatDate(str(DATE.month)), self.__formatDate(str(DATE.day))), headers=getHeaders())\n data = json.loads(s.content)\n\n operating_hours_start = None\n operating_hours_end = None\n extra_hours_start = None\n extra_hours_end = None\n\n try:\n for i in range(len(data['schedules'])):\n if data['schedules'][i]['type'] == 'Operating':\n operating_hours_start = datetime(DATE.year, DATE.month, DATE.day, int(data['schedules'][i]['startTime'][0:2]), int(data['schedules'][i]['startTime'][3:5]))\n if int(data['schedules'][i]['endTime'][0:2]) >= 0 and int(data['schedules'][i]['endTime'][0:2]) <= 7:\n DATETEMP = DATE + timedelta(days=1)\n operating_hours_end = datetime(DATETEMP.year, DATETEMP.month, DATETEMP.day, int(data['schedules'][i]['endTime'][0:2]), int(data['schedules'][i]['endTime'][3:5]))\n else:\n operating_hours_end = datetime(DATE.year, DATE.month, DATE.day, int(data['schedules'][i]['endTime'][0:2]), int(data['schedules'][i]['endTime'][3:5]))\n\n if data['schedules'][i]['type'] == \"Special Ticketed Event\":\n extra_hours_start = datetime(DATE.year, DATE.month, DATE.day, int(data['schedules'][i]['startTime'][0:2]), int(data['schedules'][i]['startTime'][3:5]))\n if int(data['schedules'][i]['endTime'][0:2]) >= 0 and int(data['schedules'][i]['endTime'][0:2]) <= 7:\n DATETEMP = DATE + timedelta(days=1)\n extra_hours_end = datetime(DATETEMP.year, DATETEMP.month, DATETEMP.day, int(data['schedules'][i]['endTime'][0:2]), int(data['schedules'][i]['endTime'][3:5]))\n else:\n operating_hours_end = datetime(DATE.year, DATE.month, DATE.day, int(data['schedules'][i]['endTime'][0:2]), int(data['schedules'][i]['endTime'][3:5]))\n\n except KeyError:\n pass\n return operating_hours_start, operating_hours_end, extra_hours_start, extra_hours_end",
"def active_hours(self):\n return self._active_hours",
"def post(self, request):\n\t\tserializer = PeopleSerializer.WorkingHourSerializer(data=request.data)\n\t\tif serializer.is_valid():\n\t\t\tserializer.save()\n\t\t\treturn Response(serializer.data, status=200)\n\t\treturn Response(serializer.errors, status=422)",
"def hour(self):\n return self._hour"
] |
[
"0.6702672",
"0.63559496",
"0.6134469",
"0.60062844",
"0.58493066",
"0.5845087",
"0.5821502",
"0.58081955",
"0.57617134",
"0.5753844",
"0.5742273",
"0.5733102",
"0.57142144",
"0.56817853",
"0.56517273",
"0.5609031",
"0.558943",
"0.5580721",
"0.5514128",
"0.549668",
"0.5485034",
"0.5459778",
"0.5456188",
"0.543028",
"0.54280025",
"0.5410326",
"0.5409438",
"0.5405698",
"0.5384301",
"0.5382816"
] |
0.72261083
|
0
|
View for rendering events feed data as json.
|
def json_events(request):
if request.method == 'GET':
ttrss_url = request.GET['feed']
# need xml for this.
university_url = 'http://events.uchicago.edu/widgets/rss.php?key=47866f880d62a4f4517a44381f4a990d&id=48'
n = datetime.datetime.now()
return JsonResponse(
{
'events': flatten_events(get_events(university_url, ttrss_url, n, n + relativedelta(years=1), False))
}
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def view_events():\n result = get_events_helper(Event)\n return jsonify(result[0]), result[1]",
"def get():\n return jsonify({'events': 'Events API'}), 200",
"def get_event():\n json_data = request.args or {}\n return make_response(jsonify({ \"data\" : Event.get_events(json_data)}))",
"def to_json(self):\n self._load_all_events()\n return json.dumps(\n [x.to_dict() for events in self._events.values() for x in events], indent=2\n )",
"def show_events_list():\r\n\tevents_list = Page.objects.filter(tags='events').order_by('-created')\r\n\treturn {'events_list': events_list}",
"def list(self):\n return JSONResponse(self.request).data(items=self._get_agenda_items()).dump()",
"def update_events(request):\n events_data = request.data\n events_manager.deserialize_event(events_data)\n # print(events_manager.serialize_events())\n events_manager.apply()\n return JsonResponse({'nodes': []})",
"def show_events(request):\n event_list = Event.objects.order_by('-date')\n\n event_form = EventForm()\n\n context = {'events': event_list, 'form': event_form}\n return render(request, 'metro_app/events_view.html', context)",
"def json_news(request):\n if request.method == 'GET':\n feed = request.GET['feed']\n return JsonResponse(\n {\n 'news': get_news(feed),\n }\n )",
"def json(self):\n return {\n 'uri': self.view_uri,\n 'created': time.strftime('%c', time.gmtime(self.created)),\n 'created_timestamp': self.created,\n 'exception_type': str(self.exc_type),\n 'exception': str(self.exc_value),\n }",
"def info_event_json(event_id):\n event = Event.query.filter_by(id=event_id).first_or_404()\n timeuntil = timesince(event.countdown, until=True)\n return jsonify(event=event.data, timeuntil=timeuntil)",
"def events_to_json(events):\n result = {}\n index = 0\n for e in events:\n event = {}\n event['id'] = e.id\n event['name'] = e.name\n event['datetime'] = e.datetime\n event['fee'] = e.fee\n event['max_capacity'] = e.max_capacity\n event['min_capacity'] = e.min_capacity\n result['event'+str(index)] = event\n index += 1\n return result",
"def json_view(self, recursive=False):\n\n context = self.context.aq_inner\n data = self.export(context, recursive=recursive)\n pretty = json.dumps(data, sort_keys=True, indent=4)\n self.request.response.setHeader(\"Content-type\", \"application/json\")\n return pretty",
"def list(self, request):\n user = request.auth.user\n events = Event.objects.order_by('datetime')\n search_text = self.request.query_params.get('q', None)\n if search_text is not None:\n events = events.filter(\n Q(cost__icontains=search_text)\n )\n search_text = self.request.query_params.get('date', None)\n if search_text is not None:\n events = events.filter(\n Q(datetime__icontains=search_text)\n )\n for event in events:\n event.bookmarked = None\n try:\n Bookmark.objects.get(event=event, user=user)\n event.bookmarked = True\n except Bookmark.DoesNotExist:\n event.bookmarked = False\n # game = self.request.query_params.get('gameId', None)\n # if game is not None:\n # events = events.filter(game__id=game)\n serializer = EventSerializer(\n events, many=True, context={'request': request})\n return Response(serializer.data)",
"def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n 'event_url' : self.event_url,\n 'event_thumbnail_url' : self.event_thumbnail_url,\n 'description' : self.description,\n 'ticket_price' : self.ticket_price,\n 'start_date' : str(self.start_date),\n 'featued' : self.featured\n }",
"def get_events_json(self, query_string, **kwargs):\n\n response = self._search_events(query_string, output_format=\"json\", **kwargs)\n\n return response.text",
"def list(request, template='events/list.html'):\n return render(request, template, {\n 'events': Event.objects.get_upcoming().order_by('start_date'),\n })",
"def events(self) -> [redirect, HTMLBody]:\n\t\t# Get all events and split into 2 groups\n\t\teventsl, eventsr = prepare_events(get_events())\n\t\treturn render_template(\"events.jinja2\", eventsl=eventsl, eventsr=eventsr)",
"def myevents(self, request, pk=None):\n user = request.auth.user\n myevents = user.events\n serializer = EventSerializer(\n myevents, many=True, context={'request': request})\n return Response(serializer.data)",
"def userevent_list(request):\n if request.method == 'GET':\n with sqlite3.connect(Connection.db_path) as conn:\n conn.row_factory = sqlite3.Row\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n SELECT\n full_name,\n user_id,\n id,\n title,\n description,\n date,\n time,\n name\n FROM\n EVENTS_BY_USER\n \"\"\")\n\n dataset = db_cursor.fetchall()\n\n # Take the flat data from the database, and build the\n # following data structure for each gamer.\n #\n # {\n # 1: {\n # \"id\": 1,\n # \"full_name\": \"Admina Straytor\",\n # \"games\": [\n # {\n # \"id\": 1,\n # \"title\": \"Foo\",\n # \"maker\": \"Bar Games\",\n # \"skill_level\": 3,\n # \"number_of_players\": 4,\n # \"game_type_id\": 2\n # }\n # ]\n # }\n # }\n\n events_by_user = {}\n\n for row in dataset:\n uid = row['user_id']\n if uid in events_by_user:\n events_by_user[uid]['events'].append({\n \"id\": row['id'],\n \"title\": row['title'],\n \"description\": row['description'],\n \"date\": row['date'],\n \"time\": row['time'],\n \"game_name\": row[\"name\"]\n })\n else:\n events_by_user[uid] = {\n \"gamer_id\": uid,\n \"full_name\": row['full_name'],\n \"events\": [{\n \"id\": row['id'],\n \"title\": row['title'],\n \"description\": row['description'],\n \"date\": row['date'],\n \"time\": row['time'],\n \"game_name\": row[\"name\"]\n }]\n }\n\n events = events_by_user.values() \n\n template = 'users/list_with_events.html'\n context = {\n 'userevent_list': events\n }\n\n return render(request, template, context)",
"def list_all_events(as_format='json'):\n eventlist = []\n for event in Event.query \\\n .filter_by(is_hidden=False, lock_resources=False) \\\n .order_by(Event.starts_at.desc()).all():\n eventlist.append(event.data)\n if as_format == 'json':\n return jsonify(events=eventlist)\n headers = {'Content-Disposition': 'attachment; filename=events.csv'}\n csvlist = gen_csv(eventlist)\n return Response(stream_with_context(csvlist),\n mimetype='text/csv',\n headers=headers)",
"def json_frapp(request):\n from pv.settings import MEDIA_URL\n\n if request.GET.get('date') == None:\n start = datetime.combine(date.today(), time(0, 0))\n else:\n start = datetime.combine( datetime.strptime(request.GET.get('date'), '%Y-%m-%d').date(), time(0, 0))\n\n end = datetime.combine(start, time(23, 59))\n\n timeslots = TimeSlot.objects.filter(start__gte=start,start__lte=end).select_related('show').order_by('start')\n\n\n '''Generate categories object for output'''\n\n categories = Category.objects.all()\n categories_output = []\n\n for c in categories:\n c_entry = {\n 'id': c.id,\n 'color': c.color.replace('#', '').upper(),\n 'namedisplay': c.category,\n 'description': c.description\n }\n\n categories_output.append(c_entry)\n\n # Get all series for timeslots\n series = set()\n for ts in timeslots:\n series.add(ts.show)\n\n\n '''Generate series object for output'''\n\n series_output = []\n\n for s in series:\n metainfos = []\n metainfos.append({ 'key': 'ProduzentIn', 'value': ', '.join(ts.show.hosts.values_list('name', flat=True)) })\n metainfos.append({ 'key': 'E-Mail', 'value': ', '.join(ts.show.hosts.values_list('email', flat=True)) })\n\n image = '' if s.image.name == None or s.image.name == '' else str(get_current_site(request)) + MEDIA_URL + s.image.name\n url = '' if s.website == None or s.website == '' else s.website\n\n # Get active schedules for the given date\n # But include upcoming single timeslots (with rrule_id=1)\n schedules = Schedule.objects.filter( Q(show=s.id,is_repetition=False) &\n (\n Q(rrule_id__gt=1,dstart__lte=start,until__gte=start) |\n Q(rrule_id=1,dstart__gte=start)\n )\n )\n\n schedules_repetition = Schedule.objects.filter( Q(show=s.id,is_repetition=True) &\n (\n Q(rrule_id__gt=1,dstart__lte=start,until__gte=start) |\n Q(rrule_id=1,dstart__gte=start)\n )\n )\n\n broadcastinfos = ''\n\n if not schedules.exists():\n continue\n\n for schedule in schedules:\n broadcastinfos = broadcastinfos + generate_frapp_broadcastinfos(schedule)\n\n if schedules_repetition.exists():\n broadcastinfos = broadcastinfos + 'Wiederholung jeweils:'\n for schedule in schedules_repetition:\n broadcastinfos = broadcastinfos + generate_frapp_broadcastinfos(schedule)\n\n s_entry = {\n 'id': s.id,\n 'categoryid': s.category.values_list('id', flat=True)[0],\n 'color': s.category.values_list('color', flat=True)[0].replace('#', '').upper(),\n 'namedisplay': s.name,\n 'description': s.description,\n 'url': url,\n 'image': image,\n 'broadcastinfos': broadcastinfos,\n 'metainfos': metainfos\n }\n\n series_output.append(s_entry)\n\n\n '''Generate shows object for output'''\n\n shows_output = []\n\n for ts in timeslots:\n\n is_repetition = ' ' + _('REP') if ts.schedule.is_repetition is 1 else ''\n namedisplay = ts.show.name + is_repetition\n description = ts.show.description\n url = str(get_current_site(request)) + '/shows/' + ts.show.slug\n urlmp3 = ''\n\n # If there's a note to the timeslot use its title, description and url\n try:\n note = Note.objects.get(timeslot=ts.id)\n namedisplay = note.title + is_repetition\n description = note.content\n url = str(get_current_site(request)) + '/notes/' + note.slug\n urlmp3 = note.audio_url\n except ObjectDoesNotExist:\n pass\n\n ts_entry = {\n 'id': ts.id,\n 'seriesid': ts.show.id,\n 'datetimestart': ts.start.strftime('%d.%m.%Y %H:%M:%S'),\n 'datetimeend': ts.end.strftime('%d.%m.%Y %H:%M:%S'),\n 'namedisplay': namedisplay,\n 'description': description,\n 'url': url,\n 'urlmp3': urlmp3,\n }\n\n shows_output.append(ts_entry)\n\n output = {}\n output['categories'] = categories_output\n output['series'] = series_output\n output['shows'] = shows_output\n\n return HttpResponse(json.dumps(output, ensure_ascii=False).encode('utf8'),\n content_type=\"application/json; charset=utf-8\")",
"def serialize(self):\n return {\n 'id' : self.id,\n #had to change to 'title' for full calendar, might change\n 'title' : self.name,\n 'host' : self.created_by,\n 'start' : self.start_on.isoformat(),\n 'end' : self.end_on.isoformat(),\n 'description' : self.description,\n 'color' : 'blue',\n }",
"def event_activity_json(event_id):\n limit = request.args.get('limit') or 50\n q = request.args.get('q') or None\n if q and len(q) < 3:\n q = None\n return jsonify(activities=get_event_activities(event_id, limit, q))",
"def data_json(request):\n json_data = []\n for resource in Resource.objects.all():\n record = {} \n record['title'] = resource.name\n record['description'] = resource.description\n record['keyword'] = resource.csw_keywords.split(',')\n record['modified'] = resource.last_updated\n record['publisher'] = resource.organization\n record['contactPoint'] = resource.metadata_contact\n record['mbox'] = resource.contact_email\n record['identifier'] = resource.csw_identifier\n if resource.is_published:\n record['accessLevel'] = 'public'\n else:\n record['accessLevel'] = 'non-public'\n\n json_data.append(record)\n\n return HttpResponse(json.dumps(json_data), 'application/json')",
"def render_to_json_response(self, context, **response_kwargs):\n return JsonResponse(self.get_data(context))",
"def apigw_event():\n with open(\"events/event.json\") as json_file:\n return json.load(json_file)",
"def info_current_event_json():\n event = Event.query.filter_by(is_current=True).first() or \\\n Event.query.order_by(Event.id.desc()).first_or_404()\n timeuntil = timesince(event.countdown, until=True)\n return jsonify(event=event.data, timeuntil=timeuntil)",
"def home_feeds(request):\n result = {}\n \n result['feeds'] = []\n\n u = request.user\n\n\n # get other people's feeds, filter by friends if in social group\n feeds = Feed.objects.exclude(actor=u).order_by('-timestamp')\n result['feeds'] = [ f.get_json(me=u, android=True) for f in feeds ]\n\n return JSONHttpResponse(result)",
"def events_info(request):\n \n global input\n \n if request == 'event-based':\n client_neries = Client_neries()\n \n events = client_neries.getEvents(min_datetime=input['min_date'], \\\n max_datetime=input['max_date'], min_magnitude=input['min_mag'], \\\n max_magnitude=input['max_mag'], min_latitude=input['evlatmin'], \\\n max_latitude=input['evlatmax'], min_longitude=input['evlonmin'], \\\n max_longitude=input['evlonmax'], min_depth = input['min_depth'], \\\n max_depth=input['max_depth'], max_results=input['max_result'])\n \n for i in range(0, len(events)):\n events[i]['t1'] = events[i]['datetime'] - input['preset']\n events[i]['t2'] = events[i]['datetime'] + input['offset']\n \n elif request == 'continuous':\n m_date = UTCDateTime(input['min_date'])\n M_date = UTCDateTime(input['max_date'])\n \n t_cont = M_date - m_date\n \n events = []\n \n if t_cont > input['interval']:\n num_div = int(t_cont/input['interval'])\n t_res = t_cont - num_div*input['interval']\n \n for i in range(0, num_div):\n events.append({'author': 'NAN', 'event_id': 'continuous' + str(i), \\\n 'origin_id': -12345.0, 'longitude': -12345.0, \\\n 'datetime': m_date + i*input['interval'], \\\n 't1': m_date + i*input['interval'],\\\n 't2': m_date + (i+1)*input['interval'] + 60.0,\\\n 'depth': -12345.0, 'magnitude': -12345.0, \\\n 'magnitude_type': 'NAN', 'latitude': -12345.0, \\\n 'flynn_region': 'NAN'})\n \n events.append({'author': 'NAN', 'event_id': 'continuous' + str(i+1), \\\n 'origin_id': -12345.0, 'longitude': -12345.0, \\\n 'datetime': m_date + (i+1)*input['interval'], \\\n 't1': m_date + (i+1)*input['interval'],\\\n 't2': M_date,\\\n 'depth': -12345.0, 'magnitude': -12345.0, \\\n 'magnitude_type': 'NAN', 'latitude': -12345.0, \\\n 'flynn_region': 'NAN'})\n else:\n events.append({'author': 'NAN', 'event_id': 'continuous0', \\\n 'origin_id': -12345.0, 'longitude': -12345.0, \\\n 'datetime': m_date, \\\n 't1': m_date,\\\n 't2': M_date,\\\n 'depth': -12345.0, 'magnitude': -12345.0, \\\n 'magnitude_type': 'NAN', 'latitude': -12345.0, \\\n 'flynn_region': 'NAN'})\n\n return events"
] |
[
"0.76628214",
"0.67185247",
"0.6591272",
"0.6508852",
"0.6443426",
"0.62528884",
"0.6215547",
"0.6096349",
"0.60427344",
"0.604139",
"0.5985696",
"0.5972402",
"0.5937236",
"0.5932184",
"0.59189314",
"0.58994985",
"0.58941627",
"0.5889959",
"0.58717847",
"0.58609265",
"0.5860657",
"0.58587974",
"0.5854207",
"0.58268523",
"0.5809858",
"0.5797601",
"0.5782626",
"0.57593095",
"0.57396215",
"0.5730987"
] |
0.778918
|
0
|
View for rendering news feed data as json.
|
def json_news(request):
if request.method == 'GET':
feed = request.GET['feed']
return JsonResponse(
{
'news': get_news(feed),
}
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def top_news():\n data = get_top_news()\n return jsonify(data)",
"def news():\n\n # ensure parameters are present\n # geo = request.args.get(\"geo\")\n geo = '95060'\n if not geo:\n raise RuntimeError(\"missing geo\")\n\n # lookup articles and store them as JSON array\n article_list = lookup(geo)\n\n # TODO\n print(article_list)\n news = jsonify(article_list) \n print(news)\n # return render_template(\"index.html\")\n return article_list",
"def json_events(request):\n if request.method == 'GET':\n ttrss_url = request.GET['feed']\n\n # need xml for this. \n university_url = 'http://events.uchicago.edu/widgets/rss.php?key=47866f880d62a4f4517a44381f4a990d&id=48'\n\n n = datetime.datetime.now()\n return JsonResponse(\n {\n 'events': flatten_events(get_events(university_url, ttrss_url, n, n + relativedelta(years=1), False))\n }\n )",
"def json_format(data):\n return {\n 'Title': data[\"title\"],\n 'Publication date': data['pubDate'],\n 'News link': data['link'],\n 'Image link': data['media'],\n }",
"def GET(self, *args):\n all_news= self.get_all_news()\n all_news.sort( key=lambda n : n['date'], reverse=True)\n if len(args):\n n_last=int(args[0])\n all_news = all_news[:n_last]\n\n return json.dumps(all_news)",
"def home_feeds(request):\n result = {}\n \n result['feeds'] = []\n\n u = request.user\n\n\n # get other people's feeds, filter by friends if in social group\n feeds = Feed.objects.exclude(actor=u).order_by('-timestamp')\n result['feeds'] = [ f.get_json(me=u, android=True) for f in feeds ]\n\n return JSONHttpResponse(result)",
"def show_news_list():\r\n\tnews_list = Page.objects.filter(tags='news').order_by('-created')\r\n\treturn {'news_list': news_list}",
"def get_all_feeds(request):\n feed = Feeds.objects.all()\n\n\n\n\n serializer = FeedsSerializer(feed,many=True)\n return JsonResponse({\"FeedList\":serializer.data,\"ResponseCode\": \"200\",\"ResponseMessage\":\"Successfully\"}, safe=False)",
"def newsfeed(request):\n article_list = Article.objects.order_by('published_date')\n context = {'article_list': article_list}\n return render(request, 'sacms/newsfeed.html', context)",
"def to_json(self):\n return {\n \"item_name\": self.item_name,\n \"summary\": self.summary,\n \"content\": self.content,\n \"date_published\": self.date_published,\n \"item_slug\": self.item_slug,\n \"category_name\": self.category_name,\n }",
"def get_feed():\n return jsonify(dict({\n \"result\": mongo.get_hpfeed(),\n \"code\": 200\n }))",
"def json(self):\n if self.valid:\n return {\n 'articleID': self._id,\n 'ticker_symbol': self.ticker,\n 'published_date': self.pub_date,\n 'author_name': self.author,\n 'title': self.title,\n 'text': self.text,\n 'num_likes': 0,\n 'includes_symbols': self.includes\n }\n\n return {}",
"def news(request):\n articles = News.objects.all()\n return render(request, 'news.html', {\"articles\": articles})",
"async def news(self):\n url = f\"https://newsapi.org/v2/top-headlines?country=nz&apiKey={self.bot.news_api_key}\"\n async with ClientSession() as session:\n async with session.get(url) as response:\n r = await response.json()\n firstArticle = r[\"articles\"][0]\n nSource = firstArticle[\"source\"][\"name\"]\n nTitle = firstArticle[\"title\"]\n nTimestamp = firstArticle[\"publishedAt\"]\n embed = discord.Embed(\n title=f\"News Title: {nTitle}\", description=f\"News Source: {nSource}\"\n )\n embed.add_field(name=\"News Content\", value=firstArticle[\"description\"])\n embed.set_image(url=firstArticle[\"urlToImage\"])\n embed.set_footer(text=f\"News Timestamp: {nTimestamp}\")\n\n channel = self.bot.get_channel(self.bot.main_channel_id)\n await channel.send(embed=embed)",
"def news_feed(request):\n\n all_friends = get_all_friends(request)\n news_feed = get_news_feed(request)\n user_profile = get_users_profile(request.user.id)\n\n context = {\n 'news_feed': news_feed,\n 'user_profile': user_profile,\n 'status_form': StatusForm,\n }\n\n return render(request, 'status/news_feed.html', context)",
"def data_json(request):\n json_data = []\n for resource in Resource.objects.all():\n record = {} \n record['title'] = resource.name\n record['description'] = resource.description\n record['keyword'] = resource.csw_keywords.split(',')\n record['modified'] = resource.last_updated\n record['publisher'] = resource.organization\n record['contactPoint'] = resource.metadata_contact\n record['mbox'] = resource.contact_email\n record['identifier'] = resource.csw_identifier\n if resource.is_published:\n record['accessLevel'] = 'public'\n else:\n record['accessLevel'] = 'non-public'\n\n json_data.append(record)\n\n return HttpResponse(json.dumps(json_data), 'application/json')",
"def render(self):\n with open(self.config[\"home.newssource\"]) as fp:\n stories = json.load(fp)\n\n filename = self.config[\"home.template\"]\n parts = dict(stories=stories)\n\n return self.render_template(filename, parts)",
"def news_handle(news_json):\n #Sets all the variables to global space to be used later.\n global title_1, title_2, author_1, author_2, source_1, source_2, url_1_final, url_2_final\n #Handles data from API and assigns data to variables.\n news_data = news_json['articles']\n article_1 = news_data[0]\n article_2 = news_data[1]\n title_1 = article_1['title']\n author_1 = article_1['author']\n source_1 = article_1['source']['name']\n url_1 = article_1['url']\n url_1_final = '<a href=\"{}\">Read More</a>'.format(url_1)\n title_2 = article_2['title']\n author_2 = article_2['author']\n source_2 = article_2['source']['name']\n url_2 = article_2['url']\n url_2_final = '<a href=\"{}\">Read More</a>'.format(url_2)\n return",
"def GET(self):\n web.header(\"Content-Type\",\"application/json; charset=utf-8\")\n\n data = web.input(module=\"module\", start=\"start\", num=\"num\", type=\"type\")\n module = data[\"module\"]\n start = data[\"start\"]\n num = data[\"num\"]\n type_ = data[\"type\"]\n\n module = (1 if module == \"module\" else module)\n start = (1 if start == \"start\" else start)\n num = (1 if num == \"num\" else num)\n\n news = api.get_news_fromDB(int(module), int(start), int(num))\n\n if type_ != \"html\":\n return json.dumps(news)\n else:\n web.header(\"Content-Type\",\"text/html; charset=utf-8\")\n html = \"\"\n for item in news:\n html = html + item[\"maindiv\"]\n return html",
"def json(self):\n return {\n 'author': self.author,\n 'email': self.email,\n 'display_email': self.display_email,\n 'title': self.title,\n 'trailer_path': self.trailer_path,\n 'date': self.date,\n 'link': self.link,\n '_id': self._id\n }",
"def tweet_list_view_pure_django(request, *args, **kwargs):\n\n qs = Tweet.objects.all()\n # tweets_list = [\n # {\"id\": x.id, \"content\": x.content, \"likes\": random.randint(0, 129)} for x in qs\n # ]\n\n # @Anyi use this line of code instead of the above line\n tweets_list = [x.serialize() for x in qs]\n\n data = {\"isUser\": False, \"tweet_list_response\": tweets_list}\n # @Anyi this is a new way of returning our data\n # instead of HttpResponse() or render()\n # and since we want to make our page as dynamic as possible\n return JsonResponse(data)",
"def data_json(self, extra_context=None, publish=False):\n if not self.project.CREATE_JSON:\n # nothing to see here, but the right mimetype\n return jsonify()\n\n if not self.data:\n # this sets site.data by spreadsheet or gdoc\n self.get_context(publish)\n\n return jsonify(self.data)",
"def get(self):\n\n return {\"message\": \"Welcome to the news API. \"}",
"def json_posts_latest():\n posts = posts_base.order_by(Post.pubdate.desc())[:app.config['FEEDITEMS']]\n out = {'posts': []}\n for post_result in posts:\n post_dict = get_public_post_dict(post_result[0], post_result[2])\n out['posts'].append(post_dict)\n\n return jsonify(out)",
"def display(request):\n data = {}\n about = About.objects.all()\n if about:\n data['success']=1\n data['message']=\"Comments available\"\n data['about']=[]\n for i in range(len(about)):\n data['about'].append(\n {'about':about[i].about,\n 'about_id':about[i].id,\n })\n return JsonResponse(data)\n else:\n data['success']=0\n data['message']='no about available'\n return JsonResponse(data)",
"def newsList(request):\n\n news_count = New.objects.count() # Pocet vsech zaznamu novinek\n news_list = New.objects.all().order_by(\"date\") # Sort by date ... and only part of list\n # misto vsech zaznamu ziskat jen ty v intervalu start - stop -> API\n\n pictureOfWeek = PhotoOfWeek.objects.last()\n context = {'news_list': news_list, 'news_count': news_count, 'pictureOfWeek': pictureOfWeek}\n return render(request, 'news/newsList.html', context)",
"def format(self, *args):\n\t\tweb.header('Content-Type', 'application/json; charset=utf-8')\n\t\treturn json.dumps(self.content)",
"def get_recent_news_items():\n news_item_count = request.args.get('newsItemCount') or 3\n try:\n animal_news = AnimalNews.get_printable_news_items_all_animals(news_item_count)\n return jsonify(message=animal_news), 200\n except Exception as e:\n print(e)\n return jsonify(message='{}'.format(e)), 501",
"def news_fetch(region,news_key):\n #Allows for customizable API key and weather location.\n url = (f\"http://newsapi.org/v2/top-headlines?country={region}&apiKey={news_key}\")\n #Gets API with requests and convert to .json\n news_api = requests.get(url)\n news_json = news_api.json()\n return news_json",
"def news():\r\n with open('config.json', 'r') as cfile:\r\n config = json.load(cfile)\r\n news_api_key = config[\"news_api_key\"]\r\n response = requests.get(\"https://newsapi.org/v2/top-headlines?\"\r\n \"sources=bbc-news&apiKey=\" + news_api_key)\r\n resp_json = response.json()\r\n with open(\"news.json\", 'w') as file:\r\n json.dump(resp_json, file)\r\n file.close()"
] |
[
"0.673006",
"0.66394603",
"0.6454153",
"0.6373204",
"0.6363174",
"0.63411576",
"0.6298449",
"0.61722803",
"0.61526084",
"0.60082966",
"0.59839356",
"0.5969505",
"0.5918859",
"0.5916364",
"0.5914569",
"0.5899843",
"0.58803356",
"0.5869351",
"0.58558553",
"0.5835319",
"0.58160734",
"0.5789556",
"0.578666",
"0.5769338",
"0.57673436",
"0.575199",
"0.57514006",
"0.5728741",
"0.5715693",
"0.57057583"
] |
0.78444064
|
0
|
View for retreiving the chat status for Ask a Librarian pages. Returns json.
|
def chat_status(request):
if request.method == 'GET':
ask_name = request.GET['name']
status = get_chat_status_and_css(ask_name)
return JsonResponse(
{
'chat_status': status[0],
'chat_css': status[1],
}
)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def chat_status(request):\n team = Hunt.objects.get(is_current_hunt=True).team_from_user(request.user)\n if request.method == 'GET' and request.is_ajax():\n if(team is None):\n return render(request, 'access_error.html', {'reason': \"team\"})\n status = team.num_waiting_messages\n return HttpResponse(json.dumps({\"num_messages\": status}))\n else:\n return HttpResponseNotFound()",
"def get_conversation(request):\n collected_values = {}\n\n # Only allow GET requests for this endpoint\n if request.method != 'GET':\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Wrong HTTP verb\"\n return JsonResponse(collected_values, status=400)\n\n # Extract and form params\n uid = request.GET['uid']\n oid = request.GET['oid']\n token = request.GET['token']\n ts_query = request.GET['ts']\n time_user_seen = request.GET.get('tus')\n limit = int(request.GET['limit'])\n\n if ts_query == \"\":\n ts_query = timezone.now()\n\n change_user_seen = False\n if time_user_seen == \"true\":\n change_user_seen = True\n\n # Check if token is valid\n is_valid, collected_values[\"token\"] = check_auth(uid, token, timezone.now())\n if not is_valid:\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Invalid Token\"\n return JsonResponse(collected_values, status=400)\n\n # Collect all messages sent by two users in question listed by created at time\n message_query_set = Messages.objects.filter(\n Q(user_id=uid, other_id=oid) |\n Q(other_id=uid, user_id=oid)).order_by('-created_at')[:limit]\n\n # Collect all messages from query\n test_list = []\n for message in message_query_set:\n if change_user_seen:\n message.time_user_seen = timezone.now()\n message.save()\n test_list.append(message.get_map())\n\n # Collect return values\n collected_values[\"messages\"] = test_list\n collected_values[\"success\"] = True\n\n LOGGER.info(\"Get Conversation Result: %s\", collected_values)\n return JsonResponse(collected_values, status=200)",
"def load_messages(request):\n thread = models.MessageThread.objects.get(hash_id=request.GET['id'])\n \n # check if user is a part of this chat\n if not request.user in thread.clients.all():\n return HttpResponse(status=403)\n\n # query for messages filter\n q = [Q(thread=thread)]\n if 'before' in request.GET:\n q.append(Q(date__lt=int(request.GET['before'])))\n\n # query messages matching filter\n messages = models.Message.objects.filter(*q).order_by('-id')\n messages_data = serializers.MessageListSerializer(messages[:30]).data\n\n # mark any unread messages in chat as read\n thread.mark_read(request.user)\n return JsonResponse({\"messages\":messages_data,\"end\":messages.count() <= 30})",
"def get_conversation_list(request):\n collected_values = {}\n\n # Only accept GET requests for this endpoint\n if request.method != 'GET':\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Wrong HTTP verb\"\n return JsonResponse(collected_values, status=400)\n\n # Extract params\n uid = request.GET['uid']\n token = request.GET['token']\n limit = int(request.GET['limit']) # Force a limiter to see how many users to get\n\n # Check if the token is valid\n is_valid, collected_values[\"token\"] = check_auth(uid, token, timezone.now())\n if not is_valid:\n collected_values[\"success\"] = False\n collected_values[\"errmsg\"] = \"Invalid Token\"\n return JsonResponse(collected_values, status=400)\n\n # Maybe cache or find better way of getting most recent id's messaged\n # Do a walkthrough of all messages and count totals\n # Potential Improvement is to keep a mapping of all messages sent from users to users\n users = {}\n msg_sent = Messages.objects.filter(user_id=uid).order_by('-created_at')[:limit]\n msg_recieved = Messages.objects.filter(other_id=uid).order_by('-created_at')[:limit]\n for msg in msg_sent:\n if users.get(msg.other_id) is None:\n users[msg.other_id] = 1\n else:\n users[msg.other_id] += 1\n for msg in msg_recieved:\n if users.get(msg.user_id) is None:\n users[msg.user_id] = 1\n else:\n users[msg.user_id] += 1\n\n # Collect return values\n collected_values[\"users\"] = users\n collected_values[\"success\"] = True\n\n LOGGER.info(\"Get Conversation List Result: %s\", collected_values)\n return JsonResponse(collected_values, status=200)",
"def load_inbox(request):\n threads = models.MessageThread.objects.filter(clients=request.user).annotate(\n unread_count=Count('receipts',filter=Q(receipts__recipient=request.user))\n )\n thread_data = serializers.MessageThreadListSerializer(threads).data\n #user = userauth_models.User.objects.filter(username=request.user.username)\n #print(user.username)\n #print(get_channel_layer())\n #print(request.session['channel_name'])\n return JsonResponse({'threads':thread_data})",
"def chatlist(request):\n\n chats = get_chat_list()\n chat_list = pagination(request, chats, CHATS_PER_PAGE)\n\n dic = {'chatlist': chat_list}\n return render_to_response('whatsapp/chatlist.html', dic, context_instance=RequestContext(request))",
"async def status(self):\n cmd = subprocess.check_output([\"birdc\", \"show\", \"proto\"])\n for page in chat_formatting.pagify(cmd.decode(), ['\\n', ' '], shorten_by=12):\n await self.bot.say(chat_formatting.box(page))",
"def ajax_status(request):\r\n if not request.user.is_authenticated():\r\n raise PermissionDenied\r\n\r\n\r\n qs = UserPreference.objects.filter(\r\n user=request.user,\r\n key=NOTIFICATION_PREF_KEY\r\n )\r\n\r\n return HttpResponse(json.dumps({\"status\":len(qs)}), content_type=\"application/json\")",
"def test_get_project_chat_messages_passes(self):\n response = self.client.get(self.endpoint_url)\n response_body = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response_body, {\"chat\": [], \"pagination\": None})\n # to do: add comments and test again",
"def list(self, request, *args, **kwargs):\n return super(PublicChatViewSet, self).list(request, *args, **kwargs)",
"def chat(request):\n team = Hunt.objects.get(is_current_hunt=True).team_from_user(request.user)\n if request.method == 'POST':\n # There is data in the post request, but we don't need anything but\n # the message because normal users can't send as staff or other teams\n m = Message.objects.create(time=timezone.now(), text=request.POST.get('message'),\n is_response=False, team=team)\n team.num_waiting_messages = 0\n messages = [m]\n else:\n if(team is None):\n return render(request, 'access_error.html', {'reason': \"team\"})\n if(team.hunt.is_locked and not team.is_playtester_team):\n return render(request, 'access_error.html', {'reason': \"hunt\"})\n if request.is_ajax():\n messages = Message.objects.filter(pk__gt=request.GET.get(\"last_pk\"))\n else:\n messages = Message.objects\n messages = messages.filter(team=team).order_by('time')\n\n # The whole message_dict format is for ajax/template uniformity\n rendered_messages = render_to_string('chat_messages.html',\n {'messages': messages, 'team_name': team.team_name})\n message_dict = {team.team_name: {'pk': team.pk, 'messages': rendered_messages}}\n try:\n last_pk = Message.objects.latest('id').id\n except Message.DoesNotExist:\n last_pk = 0\n team.num_waiting_messages = 0\n\n team.save() # Save last_*_message vars\n context = {'message_dict': message_dict, 'last_pk': last_pk}\n if request.is_ajax() or request.method == 'POST':\n return HttpResponse(json.dumps(context))\n else:\n context['team'] = team\n return render(request, 'chat.html', context)",
"def get_status_messages(self):\n return self.data[\"allMessagesForFrontend\"][\"messages\"]",
"def get_messages(self):\n other_user_email = request.args.get('other_user_email')\n page = request.args.get('page')\n per_page = request.args.get('per_page')\n if not other_user_email or not page or not per_page:\n self.logger.debug(messages.MISSING_FIELDS_ERROR % \"query params\")\n return messages.ERROR_JSON % messages.MISSING_FIELDS_ERROR % \"query params\", 400\n email_token = auth.current_user()[0]\n page = int(page)\n per_page = int(per_page)\n # App sends starting with 1 but we start at 0\n page -= 1\n try:\n message_list, pages = self.friend_database.get_conversation(email_token, other_user_email, per_page, page)\n except NoMoreMessagesError:\n self.logger.debug(messages.NO_MORE_PAGES_ERROR)\n return messages.NO_MORE_PAGES_ERROR, 404\n message_list = [{k:v for k,v in m._asdict().items() if k != \"hidden_to\"} for m in message_list]\n for i in range(len(message_list)):\n message_list[i][\"timestamp\"] = message_list[i][\"timestamp\"].isoformat()\n return json.dumps({\"messages\": message_list, \"pages\": pages}), 200",
"def list_messages(chat_id):\n response = jsonify({\"messages\": list_messages_for_chat(chat_id)})\n return response",
"def get_unread_messages():\n mark_seen = request.args.get('mark_seen', True)\n unread_msg = g.driver.get_unread()\n\n if mark_seen:\n for msg in unread_msg:\n msg.chat.send_seen()\n\n return jsonify(unread_msg)",
"def single_chat(request, key):\n \n if not Messages.objects.using('msgstore').filter(key_remote_jid=key).count() > 0:\n return render_to_response('whatsapp/errors/404.html', context_instance=RequestContext(request)) \n\n msgs = get_chat_messages(key)\n msgs_list = pagination(request, msgs, MESSAGES_PER_PAGE)\n\n dic = {\n 'peer': key,\n 'chatmessages': msgs_list,\n 'gps': Messages.objects.using('msgstore').exclude((Q(longitude='0.0') | Q(latitude='0.0'))),\n 'media': Messages.objects.using('msgstore').exclude(media_url__isnull=True),\n 'PAG_TITLE': 'Conversation'\n }\n\n return render_to_response('whatsapp/chat.html', dic, context_instance=RequestContext(request))",
"def status_api(request):\n if request.method == 'GET':\n return JsonResponse({\n 'status': 'OK',\n 'version': __version__\n }, status=200)",
"def list(self, request, *args, **kwargs):\n return super(ConversationViewSet, self).list(request, *args, **kwargs)",
"def msgStatus():\n return jsonify({\"status\": \"OK\"})",
"def get_messages_json(self, limit=10):\n params = self.params\n params[\"limit\"] = limit\n response = requests.get(self.url + \"conversations.history\", params=params)\n return response.json()[\"messages\"]",
"def get_status(self):\n r = requests.get(self.base_url + '/status')\n return r.json()",
"def list(request, format=None):\r\n user_messages = request.user.profile.recent_messages()\r\n if format and format == '.json':\r\n data = {\r\n 'messages': [msg.to_json() for msg in user_messages],\r\n }\r\n return HttpResponse(json.dumps(data), mimetype='application/json')\r\n d = {\r\n 'objects': user_messages,\r\n 'title': 'Messages',\r\n }\r\n return render(request, 'usermessages/list.html', d)",
"def retrieve_conversation_history(username: Text) -> Text:\n history = inmemory_storage[username]\n if history:\n return jsonify(history)\n else:\n return jsonify(history), 404",
"def GET(self, request):\n timer = Timer()\n try:\n myaccount = get_account_info_for_current_user()\n\n resp_json = myaccount.json_data(\n use_abbr_week_month_day_format=True)\n log_success_response(logger, timer)\n return HttpResponse(json.dumps(resp_json))\n except Exception:\n return handle_exception(logger, timer, traceback)",
"def notification_list(request):\n try:\n validator = NotificationListValidator(request.GET)\n valid = validator.validate() # Validate the request\n if valid:\n current_user_id = request.user_id\n page_limit = int(request.GET['page_limit'])\n page_offset = int(request.GET['page_offset'])\n\n # notification listing\n notification_list = Notification.objects.filter(user_id=current_user_id).all().order_by('-created_on')[page_offset:page_limit+page_offset]\n serializer = NoitifcationListSerializer(notification_list, many=True)\n\n # set is_read = 1\n Notification.objects.filter(user_id=current_user_id).update(\n is_read=1\n )\n \n return Response({'data':serializer.data}, status=status.HTTP_200_OK)\n else:\n return Response({'error':requestErrorMessagesFormate(validator.get_message())}, status=status.HTTP_200_OK)\n except Exception as exception:\n logerror('notifications/views.py/notification_list', str(exception))\n return Response({'error':str(exception)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)",
"def get(self):\n self.write(\n '{\"error\": \"cryptochat-server main page, '\n 'please refer to /api/message/new or /api/message/updates\"}')",
"def get_messages(request, uuid):\n if request.is_ajax():\n user = request.user\n room = Room.objects.get(pk=uuid)\n if room.members.filter(pk=user.pk).exists():\n messages_qs = room.message_set.all()\n page = request.GET.get('page')\n paginator = Paginator(messages_qs, 20)\n try:\n selected = paginator.page(page)\n except PageNotAnInteger:\n selected = paginator.page(1)\n except EmptyPage:\n selected = []\n messages = []\n for message in selected:\n data = {\n 'sender': {\n 'name': str(message.sender),\n 'id': message.sender.pk\n },\n 'message': message.text,\n 'received_room_id': uuid,\n 'date_created': message.date_created.strftime(\"%d %b %Y %H:%M:%S %Z\")\n }\n messages.append(data)\n return JsonResponse(messages, safe=False)\n\n else:\n return Http404(_(\"Sorry! We can't find what you're looking for.\"))\n else:\n return Http404(_(\"Sorry! We can't find what you're looking for.\"))",
"def get_languages():\n\n api = (api_name, 'languages')\n\n response = make_request(api=api, action='get', **{})\n status_code = response.status_code\n content = response.text\n\n msg = str(status_code) + ' : ' + content\n \n logger.debug(\"response from spanglish languages: {}\".format(response))\n logger.debug(\"response statuscode from spanglish languages: {}\".format(status_code))\n\n click.echo(\"response message: %s \" % msg)",
"def count(request):\r\n n = request.user.profile.unread_message_count()\r\n data = {\r\n 'count': n,\r\n }\r\n return HttpResponse(json.dumps(data), mimetype='application/json')",
"def show_status():\n return jsonify({\"status\": \"OK\"})"
] |
[
"0.7056218",
"0.58047336",
"0.57761455",
"0.56156147",
"0.56094897",
"0.5595043",
"0.55575705",
"0.5555067",
"0.5535522",
"0.553365",
"0.5530453",
"0.55122775",
"0.540345",
"0.53645366",
"0.53573716",
"0.53215814",
"0.53145057",
"0.52810264",
"0.5262246",
"0.5239236",
"0.52295315",
"0.5229372",
"0.52216506",
"0.5219727",
"0.5216611",
"0.52143365",
"0.5213971",
"0.5187544",
"0.51789004",
"0.5177824"
] |
0.76308614
|
0
|
pour un indice de disque k, la famille des listes de des indices, et l'indice du disque k dans la famille L, renvoie la liste / l'ensemble des indices de disques dont la vitesse va etre modifiee par k
|
def influence(k,L,n):
try:
to_check = L[n-1] #set des indices
contact_direct=C(k,0)
return list(to_check.intersection(contact_direct))
except:
return []
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get_k_indices(self, ks):\n if self.staticneighs:\n idx_ks = ks\n else:\n idx_ks = [self.ks.index(e) for e in ks]\n return idx_ks",
"def generate_L(data_set, k, min_support):\n fptree = FPtree.fptree(data_set, min_support)\n print(\"pre:\",datetime.datetime.now())\n fptree.getPretable()\n print(\"pre:\",datetime.datetime.now())\n fptree.getRootTree()\n support_data = {}\n\n # L1,L2,support_L1 = adjacencyMatrix(data_set,min_support,support_data)\n # pretable = sorted(support_L1.items(), key=itemgetter(1, 0), reverse=True)\n # fptree = FPtree.fptree(data_set, min_support,pretable)\n # fptree.getRootTree()\n # Lksub1 = L2.copy()\n C1 = create_C1(data_set)\n # print (C1)\n # print (\"=====================\")\n L1 = generate_Lk_by_Ck(fptree,data_set, C1,1, min_support, support_data)\n Lksub1 = L1.copy()\n L = []\n L.append(Lksub1)\n for i in range(2, k+1):\n Ci = create_Ck(Lksub1, i)\n # print (Ci)\n # print (\"=====================\")\n Li = generate_Lk_by_Ck(fptree, data_set, Ci, i, min_support, support_data)\n Lksub1 = Li.copy()\n L.append(Lksub1)\n return L, support_data",
"def k_rank_approximate(doc_matrix, k):\n return []",
"def findknn(xTr,xTe,k):\n\n # YOUR CODE HERE\n if k > len(xTr):\n k = len(xTr)\n \n D=l2distance(xTe, xTr)\n (m,n) = D.shape\n \n indices = []\n dists = []\n for i in range(m):\n smallest_indices = np.argsort(D[i])\n ind = smallest_indices[:k]\n dis = D[i,smallest_indices[:k]]\n indices.append(ind)\n dists.append(dis)\n \n indices = np.transpose(np.array(indices))\n dists = np.transpose(np.array(dists))\n return indices, dists",
"def combinarink(list, k):\n global visited\n global indexes\n visited = [0 for x in range(0, len(list) + 1)] # init with 0\n indexes = [x for x in range(0, len(list) + 1)] # init indexes with 0...n-1\n output = combinari(1, len(list), k, list, [])\n print (output)",
"def _get_neighs_list_dynamic(self, k_is=[0]):\n neighs = [self.idxs[k_i] for k_i in k_is]\n return neighs",
"def gkm_rc_indices(l=4, k=3):\n names = gkm_name(l=l, k=k, rev_comp=False)\n collect_seqs = set() # Contains the seqs and rev comps added thus far\n first_index, second_index = [], []\n for i, kmer in enumerate(names):\n if kmer not in collect_seqs:\n collect_seqs.add(kmer) # Add kmer and its RC so we don't process it again\n collect_seqs.add(reverse_complement(kmer))\n first_index.append(i) # Add the pair indices\n second_index.append(names.index(reverse_complement(kmer)))\n assert len(first_index) == len(second_index)\n return np.vstack((first_index, second_index)).astype(int)",
"def _set_neighs_list_list_list(self, key):\n self.ks = list(range(len(key))) if self.ks is None else self.ks\n if self._constant_neighs:\n self.idxs = np.array(key)\n else:\n self.idxs = key\n if len(self.idxs[0]) != len(self.iss):\n self.iss = list(range(len(self.idxs[0])))\n if self.staticneighs:\n self.idxs = self.idxs[0]\n self._setted = True",
"def k(self):\n self.kTable()",
"def indices_hkl(self, H, K, L):\n from cctbx import miller\n _symm_equiv = miller.sym_equiv_indices(self.sg, (H, K, L))\n _indices = sorted([i.h() for i in _symm_equiv.indices()],\n reverse=True)\n if len(_indices) < _symm_equiv.multiplicity(False):\n _indices = _indices + [(-hh, -kk, -ll)\n for (hh, kk, ll) in _indices]\n return _indices",
"def _set_neighs_list_list(self, key):\n if self._constant_neighs:\n key = np.array(key)\n if self.staticneighs:\n self.idxs = key\n self.ks = range(1) if self.ks is None else self.ks\n else:\n self.ks = range(1) if self.ks is None else self.ks\n len_ks = len(self.ks)\n self.idxs = [key for k in range(len_ks)]\n if type(key) == np.ndarray:\n self.idxs = np.array(self.idxs)\n if len(self.iss) != len(key):\n if len(self.iss) != len(key):\n self.iss = range(len(key))\n# if len(self.idxs[0]) > 0:\n# self.iss = list(range(len(self.idxs)))\n self._setted = True",
"def print_idxlist_to_textlists(self, idx_list, worst=True, k=None, devData=None, y_pred=None, \\\n print_window=True, dataClass=None, return_indices=False): \n print (\"indices counts =\", idx_list.shape[0])\n boo = \"worst\" if worst else \"best\"\n print (\"ranked by {} cross-entropy loss\".format(boo))\n \n idx_list = [idx for (idx,ce) in self.rank_predictions(idx_selected=idx_list, worst=worst) ]\n ce_list = [ce for (idx,ce) in self.rank_predictions(idx_selected=idx_list, worst=worst) ]\n if k is not None:\n print (\"top {} results\".format(k))\n idx_list = idx_list[:k]\n ce_list = ce_list[:k] \n \n devData = (self.devX, self.devX_pos, self.devX_capitals, self.devY) if (devData is None) else devData\n y_pred = self.y_pred if (y_pred is None) else y_pred\n dataClass = self.dataClass if (dataClass is None) else dataClass\n \n word_windows = list(map(dataClass.vocab.ids_to_words, devData[0][idx_list]))\n pos_windows = list(map(dataClass.posTags.ids_to_words, devData[1][idx_list]))\n capital_windows = list(map(dataClass.capitalTags.ids_to_words, devData[2][idx_list])) \n gold_ner_class = [dataClass.nerTags.ids_to_words([tag]) for tag in devData[3][idx_list]]\n pred_ner_class = [dataClass.nerTags.ids_to_words([tag]) for tag in y_pred[idx_list]] \n\n if word_windows:\n cen = len(word_windows[0])//2 \n for i in range(len(word_windows)):\n print (\"\\nID {}\".format(idx_list[i]))\n print (\"KL divergence {}\".format(ce_list[i]))\n print (\"FEATURES: \\\"{}\\\", {}, {}\".format(word_windows[i][cen], pos_windows[i][cen], \\\n capital_windows[i][cen]))\n print (\"Gold NER {}\".format(gold_ner_class[i]))\n print (\"Pred NER {}\".format(pred_ner_class[i]))\n if print_window:\n print (\"Text window {}\".format(word_windows[i]))\n print (\"PoS window {}\".format(pos_windows[i]))\n print (\"Caps window {}\".format(capital_windows[i]))\n else:\n print (\"empty -- no predictions were made\")\n\n if return_indices:\n return idx_list",
"def _get_neighs_list_static(self, k_is=[0]):\n neighs = [self.idxs for k_i in k_is]\n return neighs",
"def build_k_indices(num_row, k_fold, seed):\n #num_row = y.shape[0]\n interval = int(num_row / k_fold)\n np.random.seed(seed)\n indices = np.random.permutation(num_row)\n k_indices = [indices[k * interval: (k + 1) * interval]\n for k in range(k_fold)]\n return np.array(k_indices)",
"def find_nearest_neighbors_idx(X, x, k):\n ## homework:start\n result = \n ## homework:end\n return result",
"def __getitem__(self, k) :\n raise NotImplementedError",
"def clustering_and_visulization(self):\n centroids, _ = kmeans(self.data_mat, self.k)\n idx, _ = vq(self.data_mat, centroids)\n for i in range(self.k):\n\n self.plot_list.append(self.data_mat[idx == i, 0])\n self.plot_list1.append(self.data_mat[idx == i, 1])\n\n for j in range(self.k):\n plot(self.plot_list[j], self.plot_list1[j], self.color_code[j])\n plot(centroids[:, 0], centroids[:, 1], 'sg', markersize=8)\n show()\n for i in range(self.k):\n self.cluster = self.data_mat[idx == i]\n self.clusterlist.append(self.cluster)\n\n for i in range(len(self.clusterlist)):\n self.clusterdict[i] = self.clusterlist[i]\n print(self.clusterdict)\n\n\n self.indexdict = {}\n for i in self.clusterdict:\n self.indexdict[i] = []\n print(len(self.clusterdict))\n for i in range(len(idx)):\n for j in range(len(self.clusterdict)):\n if (self.clusterdict[j][:] == self.data_mat[i]).any():\n self.indexdict[j].append(i)\n print(\"cluster dict of packs\",self.indexdict)\n\n self.drugdict = {}\n for i in self.clusterdict:\n self.drugdict[i] = []\n self.drug=[]\n for i in range(len(self.indexdict.keys())):\n for j in range(len(self.indexdict[i])):\n self.drugdict[i].append(self.df.iloc[self.indexdict[i][j]].to_dict())\n print(\"drugs dict with their frequencies\",self.drugdict)\n clusterdict_from_df_as_drug_non_O_frequency = {}\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs ={}\n for i in self.drugdict:\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n for i in self.drugdict:\n for j in self.drugdict[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i]=list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n try:\n common_drug_list = [x for x in clusterdict_of_non_repeated_drugs[0] if x in clusterdict_of_non_repeated_drugs[1]]\n print('\\n')\n print(\"common drug list\", common_drug_list)\n total_frequency_of_drugs_dict = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict[i] = []\n\n for drug in common_drug_list:\n\n for cluster_keys in clusterdict_from_df_as_drug_non_O_frequency.keys():\n temp_list = []\n for cluster_values_as_list in clusterdict_from_df_as_drug_non_O_frequency[cluster_keys]:\n try:\n temp_list.append(cluster_values_as_list[str(drug)])\n except KeyError:\n print(\"\\t\")\n total_frequency_of_drugs_dict[cluster_keys].append(np.sum(temp_list))\n print(\"total drugs frequency\",total_frequency_of_drugs_dict)\n total_frequency_of_drugs_dict_with_drugs = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[i] = []\n temp_list1 = []\n temp_list2 = []\n for keys in self.drugdict.keys():\n temp_list1.append(clusterdict_of_non_repeated_drugs[keys])\n for keys in self.drugdict.keys():\n temp_list2.append(total_frequency_of_drugs_dict[keys])\n temp_list3 = []\n for i in temp_list1:\n for j in temp_list2:\n temp_list3.append(dict(zip(i,j)))\n temp_list4 = temp_list3[:2]\n print('\\n')\n for keys in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[keys].append(temp_list4[keys])\n print(\"total frequency with drugs dict\",total_frequency_of_drugs_dict_with_drugs)\n\n final_drugs_in_clusters_dict = {}\n for i in self.drugdict:\n final_drugs_in_clusters_dict[i] = []\n compare_list = []\n for drug in common_drug_list:\n compare_list.append(min(total_frequency_of_drugs_dict_with_drugs[0][0][drug], total_frequency_of_drugs_dict_with_drugs[1][0][drug]))\n print(\"compare list\",compare_list)\n for values in total_frequency_of_drugs_dict_with_drugs.values():\n for key1, value1 in values[0].items():\n if value1 in compare_list:\n\n key2 =values[0].keys()[values[0].values().index(value1)]\n values[0].pop(key2, None)\n\n\n print('final dict with deleted keys', total_frequency_of_drugs_dict_with_drugs)\n\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in total_frequency_of_drugs_dict_with_drugs[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i] = list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n print(\"only drugs\",clusterdict_of_non_repeated_drugs)\n\n final_robot_packs_dict = {}\n for i in self.drugdict:\n final_robot_packs_dict[i] = []\n\n winner_drug_dict = {}\n for i in common_drug_list:\n winner_drug_dict[i] = []\n for drug in common_drug_list:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n winner_drug_dict[str(drug)].append(0)\n if drug in clusterdict_of_non_repeated_drugs[1]:\n winner_drug_dict[str(drug)].append(1)\n print(\"winner drug dict\",winner_drug_dict)\n\n for i in self.indexdict:\n print(i)\n for pack in self.indexdict[i]:\n packdict = self.df.iloc[pack].to_dict()\n packdict_non_0 = {x: y for x, y in packdict.items() if y != 0}\n packdict_non_0_key = packdict_non_0.keys()\n for drug in packdict_non_0_key:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n final_robot_packs_dict[0].append(pack)\n elif drug in clusterdict_of_non_repeated_drugs[1]:\n final_robot_packs_dict[1].append(pack)\n\n final_robot_packs_dict[i].append(pack)\n for commondrugs in winner_drug_dict:\n for winnercluster in winner_drug_dict[commondrugs]:\n if winnercluster==0:\n loosercluster =1\n if winnercluster == 1:\n loosercluster = 0\n if commondrugs in packdict_non_0_key and i==loosercluster:\n try:\n final_robot_packs_dict[i].remove(pack)\n final_robot_packs_dict[winnercluster].append(pack)\n except ValueError:\n print('\\t')\n\n for i in self.indexdict:\n final_robot_packs_dict[i] = set(final_robot_packs_dict[i])\n\n print(\"final which pack which robot dict\",final_robot_packs_dict)\n\n except IndexError:\n print(\"No common drugs\")",
"def _set_neighs_number(self, key):\n if self.staticneighs:\n self.idxs = np.array([[key]]*len(self.iss))\n else:\n if self.ks is None:\n self.ks = range(1)\n len_ks = len(self.ks)\n self.idxs = np.array([[[key]]*len(self.iss)]*len_ks)\n self._constant_neighs = True\n self._setted = True",
"def get_sparse_knn_graph(df, k, algorithm):\n print(\"df shape\", df.shape)\n X = np.array(df)\n nbrs = NearestNeighbors(n_neighbors=k, algorithm=algorithm).fit(X)\n distances, indices = nbrs.kneighbors(X)\n knn_graph = nbrs.kneighbors_graph(X).toarray()\n print(\"indices, graph\", len(indices), knn_graph.shape)\n return indices, knn_graph",
"def build_index(dataset, n_neighbors):\n# Initialize FLANN\n pyflann.set_distance_type(distance_type='euclidean')\n flann = pyflann.FLANN()\n params = flann.build_index(dataset,algorithm='kdtree',trees=4)\n #print params\n nearest_neighbors, dists = flann.nn_index(dataset, n_neighbors, checks=params['checks'])\n return nearest_neighbors, dists",
"def tril_indices(n,k=0):\r\n return mask_indices(n,tril,k)",
"def clustering_and_visulization(self):\n try:\n centroids, _ = kmeans(self.data_mat, self.k)\n except ValueError:\n print(\"The number of clusters is more than the data points\")\n self.idx, _ = vq(self.data_mat, centroids)\n for i in range(self.k):\n\n self.plot_list.append(self.data_mat[self.idx == i, 0])\n self.plot_list1.append(self.data_mat[self.idx == i, 1])\n\n for j in range(self.k):\n plot(self.plot_list[j], self.plot_list1[j], self.color_code[j])\n plot(centroids[:, 0], centroids[:, 1], 'sg', markersize=8)\n show()\n\n for i in range(self.k):\n self.cluster = self.data_mat[self.idx == i]\n self.clusterlist.append(self.cluster)\n print(self.clusterlist)\n for i in range(len(self.clusterlist)):\n self.clusterdict[i] = self.clusterlist[i]\n print(self.clusterdict)\n\n index_dict ={}\n for i in self.clusterdict:\n index_dict[i] = []\n for i in range(len(self.data_mat)):\n for j in range(len(self.clusterdict)):\n if (self.clusterdict[j][:] == self.data_mat[i]).any():\n index_dict[j].append(i)\n print(\"drugs cluster dict\", index_dict)\n\n self.drugsdict = {}\n for i in index_dict:\n self.drugsdict[i] = []\n drugslist = list(self.df.columns.values)\n print(\"drugs list from dataframe\", drugslist)\n\n for i in index_dict:\n self.drugsdict[i] = [drugslist[index] for index in index_dict[i]]\n\n print(\"drugs cluster dict\", self.drugsdict)\n########################################################################################################################\n clusterdict_from_df_as_drug_frequency = {}\n clusterdict_from_df_as_drug_non_O_frequency = {}\n\n print('\\n')\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_frequency[i] = []\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_frequency[i].append(self.df.iloc[i].to_dict()) #\n print(\"packs in dict form of drugs frequency\", clusterdict_from_df_as_drug_frequency)\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n\n for i in range(len(self.df)):\n for j in clusterdict_from_df_as_drug_frequency[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n for i in range(len(self.df)):\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in range(len(self.df)):\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n for i in range(len(self.df)):\n clusterdict_of_non_repeated_drugs[i] = list(more_itertools.collapse(\n [list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n robot_for_packs_dict = {}\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = []\n\n # for i in range(len(self.df)):\n for i in range(len(self.df)):\n for j in clusterdict_of_non_repeated_drugs[i]:\n if j in self.drugsdict[0]:\n robot_for_packs_dict[i].append(0)\n elif j in self.drugsdict[1]:\n robot_for_packs_dict[i].append(1)\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = set(robot_for_packs_dict[i])\n\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = list(more_itertools.collapse(robot_for_packs_dict[i]))\n print('\\n')\n print(\"clusterdict_of_non_repeated_drugs\", robot_for_packs_dict)",
"def tree_idx2(treeG,k1,J1,J2):\n g = treeG[J1]['clusters'][k1]\n if(J1>J2+1):\n for j in np.arange(J2+1, J1)[::-1]:\n g1 = []\n for i in np.arange(0,len(g),1):\n g1 = np.array(np.append(g1,treeG[j]['clusters'][g[i]]), dtype = int)\n g = g1\n y = g\n return y",
"def hierarchial_clustering(self,k):\r\n\r\n print(colored(\"Performing hierarchial clustering\",color = 'yellow', attrs=['bold']))\r\n self.clustering = AgglomerativeClustering(affinity='euclidean', linkage='ward').fit(self.X)\r\n self.labels = self.clustering.labels_\r\n self.davies_bouldin_score()\r\n print()\r\n print(colored(\"The number of cluster centers formed are %d\\n\" %(self.clustering.n_clusters_),color = 'red', attrs=['bold']))\r\n self.cluster_plot()\r\n return self.labels",
"def _set_neighs_array_lvl2(self, key):\n sh = key.shape\n ## If only iss and neighs\n self.idxs = key\n if self.staticneighs:\n self.idxs = np.array(key)\n else:\n len_ks = len(self.ks) if self.ks is not None else 1\n self.ks = range(1) if self.ks is None else self.ks\n self.idxs = np.array([key for k in range(len_ks)])\n self._setted = True\n if sh[0] != len(self.iss):\n self.iss = list(range(sh[0]))",
"def build(self,neighborhoods,k=5):\n g_idx = np.zeros(len(neighborhoods),dtype=np.int)\n for i, nn in enumerate(neighborhoods):\n G = Graph(nn,k)\n g_idx[i] = self.encounter(G)\n for i, sig in enumerate(self.sigs):\n if sig not in self.lookup:\n self.lookup[sig] = np.array([],dtype=np.int)\n self.lookup[sig] = np.hstack((self.lookup[sig],np.argwhere(g_idx==self.index[sig]).flatten()))",
"def rebuild_indexes(self):\n self.cards = sorted(self.name_to_card.values(), key=lambda card: card.name)\n self.card_sets = sorted(\n self.code_to_card_set.values(), key=lambda cset: cset.release_date\n )\n\n self.set_code_to_printings = collections.defaultdict(list)\n self.card_name_to_printings = collections.defaultdict(list)\n self.set_name_num_mv_to_printings = collections.defaultdict(list)\n\n for printing in self.id_to_printing.values():\n self.set_code_to_printings[printing.set_code].append(printing)\n self.card_name_to_printings[printing.card_name].append(printing)\n # snnm == (set, name, number, multiverseid)\n snnm_index_keys = {\n # pylint: disable=line-too-long\n (\n printing.set_code,\n printing.card_name,\n printing.set_number,\n printing.multiverseid,\n ),\n (printing.set_code, printing.card_name, None, printing.multiverseid),\n (printing.set_code, printing.card_name, printing.set_number, None),\n (printing.set_code, printing.card_name, None, None),\n }\n for key in snnm_index_keys:\n self.set_name_num_mv_to_printings[key].append(printing)\n\n for printings in self.set_code_to_printings.values():\n printings.sort(key=set_code_to_printings_key)\n\n for printings in self.card_name_to_printings.values():\n printings.sort(key=card_name_to_printing_key)\n\n # Build ordered indexes\n self.set_code_to_printing_to_row = {}\n for set_code, printings in self.set_code_to_printings.items():\n self.set_code_to_printing_to_row[set_code] = {\n printing: i for i, printing in enumerate(printings)\n }",
"def ssk_from_indices( indices_l, indices_r ):\n return mat[ [[int(il)] for il in indices_l], [int(ir) for ir in indices_r] ]",
"def __getitem__(self,k):\n if type(k) is IntType: return self.data[k, 0]\n \n vec = [type(x) is SliceType for x in k]\n \n if True in vec: #suppose only one slice\n ii=vec.index(True)\n indices=[]\n k = list(k)\n import numpy\n rep = numpy.zeros((self.dims[ii],), 'd')\n for i in range(self.dims[ii]):\n k[ii] = i\n rep[i] = self.data[self.comp(k), 0]\n return rep\n else:\n return self.data[self.comp(k), 0]",
"def _index_q_list_in_k_list(q_list, k_list):\r\n q_list_length = len(q_list)\r\n k_list_length = len(k_list)\r\n for idx in range(k_list_length - q_list_length + 1):\r\n t = [q == k for q, k in zip(q_list, k_list[idx: idx + q_list_length])]\r\n # print(idx, t)\r\n if all(t):\r\n # print(idx)\r\n idx_start = idx\r\n return idx_start"
] |
[
"0.5962983",
"0.57616395",
"0.57209855",
"0.56889814",
"0.567445",
"0.5651179",
"0.5565496",
"0.55339205",
"0.5526475",
"0.5519671",
"0.55177283",
"0.55137473",
"0.54557353",
"0.54334563",
"0.5427422",
"0.54150856",
"0.5399065",
"0.5382154",
"0.53808355",
"0.5375292",
"0.5372239",
"0.53716",
"0.5364934",
"0.53506434",
"0.5349942",
"0.534587",
"0.5337015",
"0.5327365",
"0.5323884",
"0.5304408"
] |
0.57878196
|
1
|
Add a new hotel to the system
|
async def add_hotel_endpoint(request):
hotel_name = request.args["hotel_name"][0]
hotel_id = model.add_hotel(hotel_name)
return json({"hotel_id": hotel_id})
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_new_arrival(self):\n pass",
"def addHotspot( self, hotspot ):\n self._hotspots.append(hotspot)",
"async def add_reservation_endpoint(request):\n hotel_id = request.args[\"hotel_id\"][0]\n room_type = request.args[\"room_type\"][0]\n arrival_date = request.args[\"arrival_date\"][0]\n departure_date = request.args[\"departure_date\"][0]\n status = request.args[\"status\"][0]\n reservation_id = model.add_reservation(hotel_id, room_type, arrival_date, departure_date, status)\n if reservation_id == model.OPERATION_ERROR_RETURN_CODE:\n return json({\"success\": False})\n return json({\"success\": True, \"reservation_id\": reservation_id})",
"async def add_inventory_endpoint(request):\n hotel_id = request.args[\"hotel_id\"][0]\n room_type = request.args[\"room_type\"][0]\n room_inventory = request.args[\"room_inventory\"][0]\n model.add_inventory(hotel_id, room_type, room_inventory)\n return json({\"success\": True})",
"def add_food(self, _food):\n self.food.append(_food)",
"def add_room(self, data):\n room_id = data['room_id']\n x, y = literal_eval(data['coordinates'])\n room_data = {'id': data['room_id'],\n 'title': data['title'],\n 'description' : data['description'],\n 'coordinates': literal_eval(data['coordinates']),\n 'elevation': data['elevation'],\n 'terrain': data['terrain'],\n 'exits' : {direction: '?' for direction in data['exits']}\n }\n self.rooms.setdefault(room_id, room_data)",
"def add_registry(self) -> None:\n\n # inits functions corresponding to user input and takes in url input\n item_options = {'n': self.inp_item_price, 'y': self.inp_book_prices}\n url = str(input(\"Enter URL to amazon item: \"))\n # validates url input - prevents inputting duplicate and/or blank URLs\n if(url == \"\" or url in self.load_links()[1]):\n print(\"Item not added - URL already exists or is blank\")\n return\n # user-input price(s) -> then -> validates price input \n prices = item_options.get(self.input_item_category())()\n try:\n for price in prices:\n float(price)\n except ValueError:\n print(\"Do not include any letters or symbols other than '.' - Item not added!\")\n return\n # writes input as a line of text to text file\n with open(URL_FILE, 'a') as text_file:\n text_file.write(self.format_string(url, prices))\n pass",
"def add_station(self, station_id=None, time=None, location=None):",
"def insert(self, name, address, city, state, zipcode, hour, phone, rating, image):\r\n pass",
"def restaurants_new():\n # If the user isn't logged in, send to the login page\n if helper.handle_login(login_session) is False:\n return redirect('/login')\n\n if request.method == 'POST':\n if len(request.form['name']) > 0:\n new_restaurant = Restaurant(name=request.form['name'],\n address=request.form['address'],\n phone=request.form['phone'],\n web=helper.check_restaurant_URL(request.form['web']),\n description=request.form['description'],\n user_id=login_session['user_id'])\n session.add(new_restaurant)\n session.commit()\n flash(\"New restaurant created - {}\".format(new_restaurant.name))\n tag_line = request.form['tag_line']\n tag_list = tag_line.split(',')\n for tag in tag_list:\n helper.add_tag_if_not_exists(tag, new_restaurant.id)\n return redirect(url_for('restaurants_page'))\n else:\n flash(\"Incorrect Restaurant details - Please include a name!\")\n\n user_info = helper.get_user_if_exists(login_session)\n return render_template('newrestaurant.html', user_info=user_info)",
"def add_location(self, **kwargs):\n \n self.options.update(kwargs)\n self.options['action'] = 'locator.location.add'\n return self.call(self.options)",
"def newEquipment(recipe):\r\n db = db_helpers.getDbCon()\r\n cursor = db.cursor()\r\n equipmentInsertQuery = \"\"\"INSERT into equipment (equipment_id, equipment_name) \r\n VALUES (%s, %s) ON Duplicate KEY UPDATE equipment_id = equipment_id;\"\"\"\r\n try:\r\n for instr in recipe.instructions:\r\n for equip in instr.equipment:\r\n cursor.execute(equipmentInsertQuery, (equip.equipment_id, equip.equipment_name))\r\n db.commit()\r\n except Exception:\r\n print(\"Error: OOPs something went wrong while adding new equipment to the database\")\r\n finally:\r\n cursor.close()\r\n db.close()",
"def create_hotels_list(self):\n\n hotels = load.loader.get_all_hotels()\n\n self.clear_widgets()\n\n for hotel in hotels:\n btn = HotelButton(text=hotel.name)\n self.add_widget(btn)\n btn.bind(on_release=lambda bt: self.select(bt.text))",
"def register_restaurant(self, id, location, meals_list):\r\n r = Restaurant(id)\r\n r.set_location(location)\r\n r.set_meals_offered_list(meals_list)\r\n self._restaurants_list.append(r)",
"def add_flight(self, flight: Flight):\n self.flights.append(flight)",
"def test_create_hotel(self):\n amsterdam = City.objects.get(name=\"Amsterdam\")\n ibis = Hotel.objects.get(name=\"Ibis\")\n\n self.assertEqual(ibis.city, amsterdam)\n self.assertEqual(ibis.code, \"AMS01\")\n self.assertEqual(ibis.name, \"Ibis\")",
"def _create_fleet(self):\n # make an alien\n alien = Alien(self)\n self.aliens.add(alien)",
"def add_visit():\n\n # checks to see if user is logged in\n\n if session.get('username'):\n username = session['username']\n user = User.query.filter_by(username=username).first()\n\n # finds the friend searched for on the database\n friend = request.args.get(\"friend\")\n friend_user = User.query.filter_by(username=friend).first()\n\n when = request.args.get(\"when\")\n user_rating = Decimal(request.args.get(\"rating\"))\n\n # finds the restaurant's ID, adds the restaurant to the database if not in yet\n restaurant = request.args.get(\"name\")\n yelp_id = request.args.get(\"id\")\n avg_rating = request.args.get(\"avg_rating\")\n price_lvl = request.args.get(\"price\")\n review_count = request.args.get(\"rc\")\n categs = request.args.get(\"categs\")\n list_categs = categs.split(\",\")\n\n if not Restaurant.query.filter_by(name=restaurant).all():\n new_restaurant = Restaurant(yelp_id=yelp_id,\n name=restaurant,\n rating=avg_rating,\n price=turn_to_nums(price_lvl),\n review_count=review_count)\n db.session.add(new_restaurant)\n db.session.commit()\n\n rest_id = db.session.query(Restaurant.id).filter_by(yelp_id=yelp_id).first()[0]\n if not Category.query.filter_by(rest_id=rest_id).all():\n if len(list_categs) == 3:\n categ1, categ2, categ3 = list_categs\n elif len(list_categs) == 2:\n categ1, categ2 = list_categs\n categ3 = None\n else:\n categ1 = list_categs\n categ2 = None\n categ3 = None\n new_categs = Category(rest_id=rest_id,\n categ1=categ1,\n categ2=categ2,\n categ3=categ3)\n db.session.add(new_categs)\n db.session.commit()\n\n # Adding to the visits and uservisits tables\n new_visit = Visit(rest_id=rest_id, date=when)\n db.session.add(new_visit)\n db.session.commit()\n new_visit_id = db.session.query(Visit.id).filter_by(rest_id=rest_id,\n date=when).order_by(Visit.date.desc()).first()[0]\n new_visit_exp = UserExp(visit_id=new_visit_id,\n user_id=user.id,\n rating=user_rating)\n f_new_visit_exp = UserExp(visit_id=new_visit_id,\n user_id=friend_user.id)\n db.session.add(new_visit_exp)\n db.session.add(f_new_visit_exp)\n db.session.commit()\n return \" <span class='label label-success'>Saved!</span>\"\n\n # if not logged in, cannot save\n else:\n return \" <a href='/login'><span class='label label-default'>Login to save</span></a>\"",
"def add(self, offer):\n other_offer = self.get(offer.get_price(), offer.get_way())\n if other_offer:\n other_offer.add_quote_amount(offer.get_quote_amount())\n other_offer.add_base_amount(offer.get_base_amount())\n return\n self.book[offer.get_way()].append(offer)\n self.book[offer.get_way()] = sorted(self.book[offer.get_way()], key=lambda entry: entry.get_price(),\n reverse=(offer.get_way() == Trade.WAY_BUY))",
"def post(self, request, _format=None): # pylint: disable=unused-argument, no-self-use\n # Assign hotel_booking variables\n if '_TRANSACTION_TYPE_' in request.data['slots']:\n transaction_type = request.data['slots']['_TRANSACTION_TYPE_']['candidates'][0]['tokens']\n else:\n transaction_type = None\n\n if '_LOCATION_' in request.data['slots']:\n location = request.data['slots']['_LOCATION_']['candidates'][0]['tokens']\n else:\n location = None\n\n if '_PRICE_' in request.data['slots']:\n price = request.data['slots']['_PRICE_']['candidates'][0]['tokens']\n else:\n price = None\n\n # this loop sets all of the _SLOTS_ to have a `\"resovled\": 1` so they will be kept\n # through each turn of the conversation. Currently, each turn the slots are sent\n # with a `\"resolved\": -1`, so they need to be reset each time, however, they are\n # changing to be persistent based on their resolved status in an update coming soon\n for (slot, slot_data) in request.data['slots'].iteritems():\n if 'candidates' in request.data['slots'][slot]:\n for candidate in range(len(slot_data['candidates'])):\n request.data['slots'][slot]['candidates'][candidate]['resolved'] = 1\n if slot != '_DATE_':\n request.data['slots'][slot]['candidates'][candidate]['value'] = \\\n request.data['slots'][slot]['candidates'][candidate]['tokens']\n else:\n request.data['slots'][slot]['resolved'] = 1\n\n #magical API call to check their credit\n available_credit = check_available_credit()\n\n # state transition example\n # if someone does not have enough credit available to pay for the hotel, \n # redirect them to a credit_card_offer state, and return the payload\n if available_credit < price:\n request.data['state'] = 'credit_card_offer'\n return Response(request.data)\n\n if transaction_type == 'express deal':\n if location and price:\n # This is our magical API call to find express deals\n hotel = find_express_deal(location, price)\n if hotel:\n # This is how to add new _SLOTS_ to the business logic json\n hotel_rating = {\n \"candidates\": [\n {\n \"resolved\": 1,\n \"value\": hotel['hotel_rating']\n }\n ],\n \"required_matches\": \"EQ 1\",\n \"type\": \"string\"\n }\n request.data['slots']['_HOTEL_RATING_'] = hotel_rating\n hotel_type = {\n \"candidates\": [\n {\n \"resolved\": 1,\n \"value\": hotel['hotel_type']\n }\n ],\n \"required_matches\": \"EQ 1\",\n \"type\": \"string\"\n }\n request.data['slots']['_HOTEL_TYPE_'] = hotel_type\n\n # return the modified business logic payload\n return Response(request.data)",
"def add_feature(request):\n\n r = {}\n if request.POST.get('code','000') == 'ch00seW199Er':\n # pick a random location\n featured_already = Featured.objects.all().values('location')\n locations = Location.objects.exclude(id=1).exclude(id__in=featured_already).exclude(name__iregex=r'[\\w# ]+(wash|washer|dryer|dyer)[\\w# ]*').filter(type=Location.EATERY)\n features = sample(locations, 10)\n i = randint(0,9)\n selected = features[i]\n tomorrow = date.today()+timedelta(1)\n \n f = Featured(location=selected, \n day=tomorrow,\n description=\"50 cents off if you transact here today\",\n amount=0.5,\n expires=datetime(tomorrow.year, tomorrow.month, tomorrow.day, 13,59))\n f.save() \n r['result'] = {'location': selected.name, 'loc_id': selected.id}\n else:\n r['result'] = '-1'\n return JSONHttpResponse(r)",
"def add_location(db_path: str, location: Location) -> None:\n query = f'INSERT INTO locations (name, area, climate) VALUES (\"{location.name}\", {location.area}, {location.climate.climate_type})'\n\n conn: Connection = sqlite3.connect(path.join(db_path, 'company_data.db'))\n curr: Cursor = conn.cursor()\n try:\n curr.execute(query)\n except sqlite3.IntegrityError:\n raise ValueError(\"Error, tray already exists in database.\")\n\n conn.commit()\n curr.close()\n conn.close()",
"def addBooking(self, booking):\n self.bookings.addBooking(booking.getID())",
"def add_new_oneoff():\n ClientID = request.form['ClientID']\n oneoff_name = request.form['oneoff_name']\n charge = int(float(request.form['charge']) * 100)\n period = request.form['period']\n time = int(float(request.form['time']) * 100)\n workdate = request.form['workdate']\n \n OneOff.insert(oneoff_name, ClientID, charge, time, period, workdate)\n\n return redirect(url_for('all_jobs_for_client', ClientID=ClientID))",
"def create_office(self, data):\n return self.client.post(\n path='/api/v1/offices/', data=json.dumps(data), content_type='application/json')",
"def add(self):\n pass",
"def add_place(name, country, city, street):\n place = Place(name=name, country=country, city=city, street=street)\n session.add(place)\n session.commit()",
"def handle_add(self, controller):\n \n try:\n pizza = controller.customer.pizza ## get a reference to pizza object of the customer\n \n except Exception:\n showinfo(title='Pop-up', message=\"No Pizza Created Yet.\")\n return\n \n else:\n # create an order if not exist, and add pizza to order\n c = controller.customer\n self.onPress(c) ## update requested data\n if not c.my_order:\n c.my_order = Order(c.name, c.address, c.id)\n \n c.AddToOrder()\n controller.show_frame(PageTwo) ## go to my order page",
"def _addSite(self,site):\n self.sites.append(site)",
"def add_location():\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n \n form = Location_Form()\n\n if form.validate_on_submit():\n try:\n location = Location(\n site_name = form.site_name.data,\n city = form.city.data,\n state = form.state.data\n )\n db.session.add(location)\n db.session.commit()\n except IntegrityError:\n flash(\"This location already exists\", \"danger\")\n return render_template(\"/admin/add_location.html\", form = form)\n \n flash(\"Location Added!\", \"success\")\n return redirect(\"/administrator\")\n else:\n return render_template(\"/admin/add_location.html\", form = form)"
] |
[
"0.63080275",
"0.5862244",
"0.58347523",
"0.5819879",
"0.5525398",
"0.5483701",
"0.54169357",
"0.5386112",
"0.5373534",
"0.52948624",
"0.52928376",
"0.5286119",
"0.5283983",
"0.5268902",
"0.5244285",
"0.52413",
"0.5239886",
"0.52386993",
"0.5225297",
"0.5223356",
"0.52185214",
"0.51923454",
"0.51849073",
"0.51708066",
"0.5161593",
"0.51608807",
"0.5150096",
"0.5136178",
"0.5118596",
"0.51046026"
] |
0.67610687
|
0
|
Add inventory to a given hotel
|
async def add_inventory_endpoint(request):
hotel_id = request.args["hotel_id"][0]
room_type = request.args["room_type"][0]
room_inventory = request.args["room_inventory"][0]
model.add_inventory(hotel_id, room_type, room_inventory)
return json({"success": True})
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def add_to_inventory(self, newItem):\n\n if len(self.player_inventory) >= 8:\n print(\"\"\"You already have the maximum of 7 items in your inventory,\n looks like you will need to get rid of an item to get {}\"\"\".format(newItem.name))\n\n print(\"Would you like to get rid of an item to add the {} to your inventory?\".format(newItem.name))\n\n if 'yes' in choice:\n dropping = player_inventory.drop()\n print(dedent('Okay, {} was removed from your inventory.'.format(item_name)))\n\n elif 'no' in choice:\n print(dedent('Okay redirecting you back to shop.'))\n return False\n\n else:\n print(dedent('Seems like you did not make a valid choice, aborting ...'))\n return False\n\n else:\n\n if newItem.type == \"food\":\n self.player_inventory[newItem.name] = newItem.health_addition\n elif newItem.type == \"weapon\":\n self.player_inventory[newItem.name] = newItem.quality\n\n print(dedent(\"\"\"\n ##############################################\n Nice, the {} has been added to your inventory!\n \"\"\".format(newItem.name)))",
"def add_to_inventory(self, item):\n\t\tif item in self.inventory:\n\t\t\tself.inventory[item] += 1\n\t\telse:\n\t\t\tself.inventory[item] = 1",
"def load_inventory(self):\n for item in self.items:\n self.rooms[int(item.initial_room_id) - 1].inventory.add(item)",
"def inventory_add(self, item):\n if (len(self.ItemList) >= self.InventorySize):\n # Inventory full\n return 2\n self.ItemList.append(item)\n return 0",
"def addToInventory(modList, item):\r\n modList.append(item)",
"def inventoryAdd(obj):\n size=1\n if obj==\"TSA Trophy\":\n size =2\n print(\"The TSA Trophy takes two hands to pick up.\")\n if len(inventory)+size>2:\n print(\"Your hands are too full to pick up\",obj+\".\")\n else:\n print(\"You picked up\",obj)\n inventory.append(obj)\n inventoryCall()",
"def add_to_inventory(self, item_to_add_to_inventory):\n raise NotImplementedError(\"Subclasses define what adding to the inventory entails\")",
"def add_appliance(itemcode, description, marketprice, rentalprice):\n\n itembrand = input(\"Enter item brand: \")\n itemvoltage = input(\"Enter item voltage: \")\n newitem = ElectricAppliances \\\n (itemcode, description, marketprice, rentalprice,\n itembrand, itemvoltage)\n\n FULLINVENTORY[itemcode] = newitem.returnasdictionary()\n print(\"New inventory item added\")",
"def add_to_inventory(item, location, quantity, user=None):\n\n try:\n inventory = Inventory.objects.get(item=item, location=location)\n inventory.quantity += quantity\n inventory.save()\n except ObjectDoesNotExist:\n inventory = Inventory.objects.create(item=item, location=location, quantity=quantity)\n\n transaction = InventoryTransaction.objects.create(inventory=inventory, quantity=quantity, user=user)\n\n return transaction",
"def add_inventory(cd_instance, lst_Inventory):\r\n \r\n lst_Inventory.append(cd_instance) \r\n return lst_Inventory",
"def add_item(self,itm,qty=1):\n inv = self.get_inventory()\n s = str(itm)\n inv[s] = inv.get(s, 0) + qty\n self.put_inventory(inv)",
"def addEquipmenttoRecipe(recipe):\r\n db = db_helpers.getDbCon()\r\n cursor = db.cursor()\r\n recipe_instruction_id = instruction_helpers.getRecipeInstructionID(recipe)\r\n recipeEquipmentInsertQuery = \"\"\"INSERT into recipe_equipment (recipe_instruction_id, equipment_id) VALUES (%s, %s)\"\"\"\r\n try:\r\n for ind, instr in enumerate(recipe.instructions):\r\n for equip in instr.equipment:\r\n cursor.execute(recipeEquipmentInsertQuery, (recipe_instruction_id[ind], equip.equipment_id))\r\n db.commit()\r\n except Exception:\r\n print('Error: OOPs something went wrong while adding Equipment to a Recipe!')\r\n finally:\r\n cursor.close()\r\n db.close()",
"def add_newInventory(id, title, artist, table):\r\n dicRow = {'ID': id, 'Title': title, 'Artist': artist}\r\n table.append(dicRow)",
"def put_in(self, item):\n try:\n self.bag_of_holding.append(item)\n print(\"You have added {} to your inventory.\".format(item))\n except:\n print('Error in Inventory method: put_in')",
"def _add_to_inv(self, block_):\n if block_ in self._inventory:\n self._inventory[block_] += 1\n else:\n self._inventory[block_] = 1",
"def add_ingredient_to_shop_list (self, ingredient) :\n found = False\n qty_available = self.quantity_in_fridge (ingredient)\n for ing in self.shop_list :\n if ing.equals(ingredient) :\n qty_needed = ingredient.quantity - qty_available\n ing.add_quantity (qty_needed)\n found = True\n if found == False :\n ingredient.set_quantity(ingredient.quantity - qty_available)\n self.shop_list.append(ingredient)",
"def additemtoinventory(item):\n global ITEM_COUNT\n for i in range(0, 10): # first 10 items are weapons, (this code sux, need a better way of doing this)\n if ITEMTYPES[ITEM_LIST[ZERO_BASE_PLYR_POS]] == ITEMTYPES[i]: \n cur_weapon_strength = WEAPON_STRENGTHS[ITEMS[0]]\n new_weapon_strength = WEAPON_STRENGTHS[ITEMTYPES[i]]\n if new_weapon_strength > cur_weapon_strength:\n change_weapon(ITEMTYPES[i])\n ITEMS[0] = ITEMTYPES[i] # 'overwrite' the main weapon with the new one\n remove_item_from_map()\n return # exit here if item is weapon\n else:\n remove_item_from_map()\n return # remove the inferior weapon from the map and return\n ITEMS.append(ITEMTYPES[item])\n ITEM_COUNT = len(ITEMS)\n remove_item_from_map()",
"def add_item_to_inventory(game, *args):\n (item, action_description, already_done_description) = args[0]\n if not game.is_in_inventory(item):\n print_bold(action_description)\n game.add_to_inventory(item)\n print_italic(\"You've just got a {item}.\".format(item=item.name))\n else:\n print_italic(already_done_description)\n return False",
"def add_to_inventory(self, item, quantity):\n\t\tincreaseQuantity = None\n\t\taddToDict = True\n\t\tfor key in self.inventoryDictionary:\n\t\t\tif key.name == item.name:\n\t\t\t\taddToDict = False\n\t\t\t\tincreaseQuantity = key\n\t\t\t\tbreak\n\t\t\t\t\n\n\t\t\telse:\n\t\t\t\taddToDict = True\n\t\t\t\t\n\n\t\tif addToDict:\n\t\t\tself.inventoryDictionary[item] = quantity\n\t\telse:\n\t\t\tself.inventoryDictionary[increaseQuantity] += quantity",
"def add_food_to_bag(self):\n self.food_eaten.set(sum([species.food.get() for species in self.ecosystem]))",
"def fill_ingredient(self, ingredient: str, quantity: int) -> None:\n self.inventory_availability[ingredient] = quantity",
"def test_add_item(self):\n self.inv.add_item(self.item_helmet)\n str_inventory = self.inv.pretty\n str_item = self.item_helmet.pretty\n\n self.rebuild_instance()\n str_unequipped = self.inv.unequipped[0].pretty\n\n assert str_inventory == self.inv.pretty\n assert str_item == str_unequipped",
"def add_food(self, _food):\n self.food.append(_food)",
"def add_item(item):\n # Check first if the item already exists in the inventory\n for i in get_inventory():\n if i['name'] == item['name']:\n print(f\"[ERROR] item with name {i['name']} already exists\")\n break\n else:\n print(f'[INFO] Adding item {item}')\n INVENTORY.append(item)\n # mongo.collection().insert_one(item)",
"def inventory(self, inventory):\n\n self._inventory = inventory",
"def add_inventory_group(self, key):\n host_dict = {'hosts': [], 'vars': {}}\n self.inventory[key] = host_dict\n return",
"def getitem(self):\n self.inventory += 1",
"def add_new_item():\n #global FULL_INVENTORY\n item_code = get_input(\"Enter item code: \")\n item_desc = get_input(\"Enter item description: \")\n item_rental_price = get_input(\"Enter item rental price: \")\n\n # Get price from the market prices module\n item_price = market_prices.get_latest_price(item_code)\n new_inventory_item = inventory_class.Inventory(item_code, item_desc,\n item_price, item_rental_price)\n is_furniture = get_input(\"Is this item a piece of furniture? (Y/N): \")\n if is_furniture.lower() == \"y\":\n item_material = get_input(\"Enter item material: \")\n item_size = get_input(\"Enter item size (S,M,L,XL): \")\n new_item = furniture_class.Furniture(new_inventory_item, item_material, item_size)\n else:\n is_electrical_appliance = get_input(\"Is this item an electric appliance? (Y/N): \")\n if is_electrical_appliance.lower() == \"y\":\n item_brand = get_input(\"Enter item brand: \")\n item_voltage = get_input(\"Enter item voltage: \")\n new_item = elec_appliances_class.ElecAppliances(new_inventory_item,\n item_brand, item_voltage)\n else:\n new_item = new_inventory_item\n FULL_INVENTORY[item_code] = new_item.return_as_dictionary()\n print(\"New inventory item added\")\n return new_item.return_as_dictionary",
"def newEquipment(recipe):\r\n db = db_helpers.getDbCon()\r\n cursor = db.cursor()\r\n equipmentInsertQuery = \"\"\"INSERT into equipment (equipment_id, equipment_name) \r\n VALUES (%s, %s) ON Duplicate KEY UPDATE equipment_id = equipment_id;\"\"\"\r\n try:\r\n for instr in recipe.instructions:\r\n for equip in instr.equipment:\r\n cursor.execute(equipmentInsertQuery, (equip.equipment_id, equip.equipment_name))\r\n db.commit()\r\n except Exception:\r\n print(\"Error: OOPs something went wrong while adding new equipment to the database\")\r\n finally:\r\n cursor.close()\r\n db.close()",
"def add_entry(barcode: str, location: str):\n # Log the current date and time and append row to inventory sheet\n row = utils.datetime_array() + [barcode, location]\n sheet.append_row(row)"
] |
[
"0.6268694",
"0.6268613",
"0.6260081",
"0.6259757",
"0.6220235",
"0.62095225",
"0.6174858",
"0.6051948",
"0.6041582",
"0.6013446",
"0.5951593",
"0.59234303",
"0.5922092",
"0.5921586",
"0.588076",
"0.5787859",
"0.5782276",
"0.576559",
"0.5759909",
"0.5720281",
"0.56820464",
"0.56791717",
"0.5660806",
"0.56493336",
"0.5630271",
"0.56248707",
"0.5614866",
"0.56053007",
"0.5572203",
"0.55680066"
] |
0.6929253
|
0
|
Cancel an existing reservation
|
async def cancel_reservation_endpoint(request):
reservation_id = request.args["reservation_id"][0]
model.cancel_reservation(reservation_id)
return json({"success": True})
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def cancel_reservation(payload, clothes_id):\n selection = Reserve.query.filter_by(clothes_id=clothes_id).all()\n # if the given clothes has not been reserved, abort 404\n if len(selection) == 0:\n abort(404)\n # if two or more user reserved the same clothe, abort umprocessable\n if len(selection) >= 2:\n abort(422)\n # check if access user_id matches reservation user_id\n reservation = selection[0]\n # querying who is accessing and check role\n access_user = User.query.filter_by(auth0_id=payload['sub']).first()\n role = access_user.role\n # if user role is \"user\", check if access user_id matches\n # reservation user_id\n reservation_user = reservation.user\n if role == 'user' and access_user.id != reservation_user.id:\n raise AuthError({\n 'code': 'Invalid_claims',\n 'description': 'Unauthorized access by user'\n }, 401)\n\n # query clothes\n clothes = reservation.clothes\n\n # set error status\n error = False\n # cancel that reservation\n try:\n clothes.status = \"\"\n reservation.delete()\n formatted_clothes = clothes.format()\n formatted_user = reservation_user.format()\n except Exception:\n reservation.rollback()\n error = True\n print(sys.exc_info())\n finally:\n reservation.close_session()\n clothes.close_session()\n\n if error:\n abort(422)\n else:\n return jsonify({\n 'success': True,\n 'clothes': formatted_clothes,\n 'user': formatted_user\n })",
"def cancel_room():\n try:\n user = User.get_user()\n except ValueError as err:\n return jsonify({\"error\": str(err)})\n\n booking_id = request.form.get(\"booking_id\")\n if not booking_id:\n return jsonify({\"error\": \"No booking id sent to server!\"})\n if \",\" in booking_id:\n return jsonify({\"error\": \"Only one booking may be cancelled at a time.\"})\n\n booking = StudySpacesBooking.query.filter_by(booking_id=booking_id).first()\n if booking:\n if (booking.user is not None) and (booking.user != user.id):\n return jsonify({\"error\": \"Unauthorized: This reservation was booked by someone else.\"}), 400\n if booking.is_cancelled:\n return jsonify({\"error\": \"This reservation has already been cancelled.\"}), 400\n\n if booking_id.isdigit():\n sessionid = request.form.get(\"sessionid\")\n if not sessionid:\n return jsonify({\"error\": \"No session id sent to server.\"}), 400\n try:\n wharton.delete_booking(sessionid, booking_id)\n save_wharton_sessionid()\n if booking:\n booking.is_cancelled = True\n sqldb.session.commit()\n else:\n save_booking(\n lid=1,\n email=user.email,\n booking_id=booking_id,\n is_cancelled=True,\n user=user.id\n )\n return jsonify({'result': [{\"booking_id\": booking_id, \"cancelled\": True}]})\n except APIError as e:\n return jsonify({\"error\": str(e)}), 400\n else:\n resp = studyspaces.cancel_room(booking_id)\n if \"error\" not in resp:\n if booking:\n booking.is_cancelled = True\n sqldb.session.commit()\n else:\n save_booking(\n email=user.email,\n booking_id=booking_id,\n is_cancelled=True,\n user=user.id\n )\n return jsonify({'result': resp})",
"def cancel():",
"def cancel(self):\r\n self.require_item()\r\n\r\n url = '{0}/cancel'.format(self.get_url())\r\n request = http.Request('PUT', url)\r\n request.use_xml = False\r\n\r\n return request, parsers.parse_empty",
"def cancel(self):",
"def cancel(self):",
"def cancel(self):",
"def cancel(self):\n self.session.rollback()",
"def cancel_a_parcel(id):\n query = \"\"\"UPDATE parcels SET status = %s WHERE id = %s\"\"\"\n tuple =('canceled' , id)\n db.insert(query, tuple)",
"def cancelbooking():\n # check of user is loggedin\n if 'loggedin' in session:\n if request.method == 'POST':\n bookingid = request.form['bookingid']\n\n response = requests.delete(\n \"http://localhost:8080/api/bookings/\"+str(bookingid))\n acc = json.loads(response.text)\n return redirect(url_for('site.bookings'))",
"def cancel(self):\n self.__canceled = True",
"def cancel(self):\n pass",
"def test_cancelBooking(self):\n user_id = \"12\"\n car_id = \"6\"\n begin_date = \"2020-05-21\" \n begin_time = \"12:00:00\"\n\n begin_datetime = \"{} {}\".format(begin_date, begin_time) \n\n booking = db.session.query(Booking).filter( Booking.user_id == user_id,\n Booking.car_id == car_id,\n Booking.begin_time == begin_datetime).first()\n \n # Delete row from the database\n db.session.delete(booking)\n\n # Update car's availability \n car = Car.query.get(car_id)\n car.booked = False\n\n # Commit changes\n db.session.commit()\n self.assertFalse(self.bookingExists(user_id, car_id))",
"def hook_cancel_assistance(self, data):\n request_id = data[\"request_id\"]\n assignee_chat_id = data[\"volunteer\"]\n log.info(\"CANCEL req:%s\", request_id)\n self.send_message(assignee_chat_id, c.MSG_REQUEST_CANCELED)\n\n self.updater.dispatcher.user_data[assignee_chat_id].update(\n {\"current_request\": None, \"reviewed_request\": None, \"state\": c.State.AVAILABLE}\n )\n del self.updater.dispatcher.bot_data[request_id]\n self.updater.dispatcher.update_persistence()",
"def action_cancel(self):\n self.state = 'canceled'",
"def do_cancel(self):\r\n self.write({'cancelled': True})",
"def cancel(self):\n self.is_active = False\n self.save()",
"def cancel(self):\n self.is_active = False\n self.save()",
"def cancel_ride(self, cancel_reason: str, ride: dict) -> None:\n card: RideCard = self.ride_card_panel.surface_ride_card(ride)\n card.open_kebab_menu()\n card.kebab_menu.cancel_ride_button.click()\n\n self.cancellation_modal.cancel_ride(cancel_reason)",
"def cancel_proposal(self, id: bytes, proposer: 'Address', current_block_height: int) -> None:\n if not self._check_registered_proposal(id):\n revert(\"No registered proposal\")\n\n proposal_info = ProposalInfo.from_bytes(self._proposal_list[id])\n\n if proposal_info.end_block_height < current_block_height:\n revert(\"This proposal has already expired\")\n\n if proposer != proposal_info.proposer:\n revert(\"No permission - only for proposer\")\n\n if proposal_info.status != NetworkProposalStatus.VOTING:\n revert(\"Can not be canceled - only voting proposal\")\n\n proposal_info.status = NetworkProposalStatus.CANCELED\n self._proposal_list[id] = proposal_info.to_bytes()",
"def reservation_delete(token_user, res_id):\n res = Reservation.query.get(res_id)\n if res is None:\n abort(404, 'reservation not found')\n\n if not token_user.has_permission('reservation.delete.elevated'):\n is_my_reservation = any(map(lambda m: m.id == token_user.id,\n res.team.members))\n if not (is_my_reservation and\n token_user.has_permission('reservation.delete')):\n abort(403, 'insufficient permissions to delete reservation')\n\n get_db().delete(res)\n get_db().commit()\n\n return '', 204",
"def _cancel(self):\n client = SBusClient(self.storlet_pipe_path)\n try:\n resp = client.cancel(self.task_id)\n if not resp.status:\n raise StorletRuntimeException('Failed to cancel task')\n except SBusClientException:\n raise StorletRuntimeException('Failed to cancel task')",
"def cancel(self, membership, callback=None):",
"def on_cancel(self):\n self.state = CANCELED\n self._reject()",
"def cancel(self, cr, uid, ids, notes='', context=None):\n notes = \"\"\n u = self.browse(cr, uid, ids)[0].user_id.name\n notes = notes +'\\n'+'vehicle Cancelled at : '+time.strftime('%Y-%m-%d') + ' by '+ u \n self.write(cr, uid, ids, {'state':'cancel','notes':notes})\n return True",
"def cancel(self):\n self.stop()\n self.make_callback('canceled')",
"def canceled(self):\n self.reject()",
"def cancel(self):\n self.cancelled = True",
"def cancel(self):\n self.cancelled = True",
"def landlord_button_cancel_tenancy(self):\n for record in self:\n self.write(\n {'state': 'cancelled', 'tenancy_cancelled': True})\n rent_ids = self.env['tenancy.rent.schedule'].search(\n [('tenancy_id', '=', record.id),\n ('paid', '=', False),\n ('move_check', '=', False)])\n for value in rent_ids:\n value.write({'is_readonly': True})\n return True"
] |
[
"0.7251533",
"0.6900359",
"0.6353264",
"0.63135654",
"0.6305536",
"0.6305536",
"0.6305536",
"0.6251353",
"0.6242757",
"0.6192752",
"0.61905634",
"0.6167782",
"0.6162976",
"0.61334383",
"0.6130032",
"0.6127152",
"0.61173344",
"0.61173344",
"0.6115619",
"0.6088514",
"0.60856",
"0.60571074",
"0.6044834",
"0.6020202",
"0.6018942",
"0.6014244",
"0.60001314",
"0.5993796",
"0.5993796",
"0.59929955"
] |
0.80371726
|
0
|
Add a new reservation
|
async def add_reservation_endpoint(request):
hotel_id = request.args["hotel_id"][0]
room_type = request.args["room_type"][0]
arrival_date = request.args["arrival_date"][0]
departure_date = request.args["departure_date"][0]
status = request.args["status"][0]
reservation_id = model.add_reservation(hotel_id, room_type, arrival_date, departure_date, status)
if reservation_id == model.OPERATION_ERROR_RETURN_CODE:
return json({"success": False})
return json({"success": True, "reservation_id": reservation_id})
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def reservation_add(token_user):\n if not json_param_exists('team_id') or \\\n not json_param_exists('room_id') or \\\n not json_param_exists('start') or \\\n not json_param_exists('end'):\n abort(400, 'one or more required parameter is missing')\n\n team_id = request.json['team_id']\n team = Team.query.get(team_id)\n if team is None:\n abort(400, 'invalid team id')\n\n if not (token_user.has_permission('reservation.create') and team.has_member(token_user)):\n abort(403)\n\n room_id = request.json['room_id']\n room = Room.query.get(room_id)\n if room is None:\n abort(400, 'invalid room id')\n\n start = parse_datetime(request.json['start'])\n end = parse_datetime(request.json['end'])\n if start is None or end is None:\n abort(400, 'cannot parse start or end date')\n\n if start >= end:\n abort(400, \"start time must be before end time\")\n\n res = Reservation(team=team, room=room, created_by=token_user,\n start=start, end=end)\n\n attempt_override = False\n if json_param_exists(\"override\") and isinstance(request.json[\"override\"], bool):\n attempt_override = request.json[\"override\"]\n\n conflict_status, conflicting_reservations = res.validate_conflicts()\n if conflict_status == Reservation.NO_CONFLICT:\n pass\n elif conflict_status == Reservation.CONFLICT_OVERRIDABLE:\n if attempt_override:\n # Delete conflicting reservations\n for conflict in conflicting_reservations:\n get_db().delete(conflict)\n else:\n return json.dumps({\"overridable\": True}), 409\n elif conflict_status == Reservation.CONFLICT_FAILURE:\n return json.dumps({\"overridable\": False}), 409\n\n get_db().add(res)\n get_db().commit()\n\n return '', 201",
"def create_new_reservation():\n if not request.json:\n return jsonify({'error': 'no body supplied'}), 400\n\n # look up by date to see if any availability\n res_date = request.json.get('date', None)\n if not res_date:\n error = 'no reservation date supplied'\n flash(error, 'error')\n return jsonify({'error': error}), 400\n\n # check if res time present, if found, convert to DT object\n res_time = request.json.get('time', None)\n if not res_time:\n error = 'no reservation time supplied'\n flash(error, 'error')\n return jsonify({'error': error}), 400\n res_time = time_str_to_obj(res_time)\n\n open_inventory = session.query(Inventory).filter_by(date=res_date).all()\n if not open_inventory:\n error = 'no open inventory for date {}'.format(res_date)\n flash(error, 'error')\n return jsonify({'error': error})\n\n error = 'reservation invalid'\n for inv in open_inventory:\n for window in inv.windows:\n if window.current_res_count < window.max_res_count:\n # check if res date falls in current window\n window_start = time_str_to_obj(window.start_time)\n window_end = time_str_to_obj(window.end_time)\n\n # if requested res time is valid, update res count and save res\n if window_start <= res_time < window_end:\n window.current_res_count = window.current_res_count + 1\n session.add(window)\n\n res = Reservation(**request.json)\n session.add(res)\n resp = session.commit()\n if not resp:\n # send message to flask for creation by name\n flash('reservation for {} created'.format(request.json.get('name')), 'success')\n return jsonify({'message': 'reservation for {} created'.format(request.json.get('name'))})\n else:\n error = 'requested reservation time is not available in current inventory'\n else:\n error = 'current inventory window cannot accept additional reservations, please select different time'\n flash(error, 'error')\n return jsonify({'error': error}), 400",
"def reservation(self, reservation):\n\n self._reservation = reservation",
"def add_new_arrival(self):\n pass",
"def create_reservation(self, gs_id, vehicle_id, user_id):\n\n # create the reservation\n reservation = Reservation(self.settings, gs_id, vehicle_id, user_id)\n status, model = reservation.create()\n\n # return status\n if status:\n json_res = model.to_json()\n return True, json_res\n else:\n return False, None",
"def validate_and_save(self, reservation, form):\n if not reservation.validate():\n context_data = self.get_context_data(reservation=reservation)\n context_data[\"error\"] = self.get_error_message(form, reservation)\n return render(self.request, self.template_name, context_data)\n\n reservation.save()\n return redirect(calendar_url_reservation(reservation))",
"def add_booking(user_id, rest_id, number_of_people, booking_datetime, table_id, entrance_datetime=None):\r\n try:\r\n booking = Booking()\r\n booking.restaurant_id = rest_id\r\n booking.user_id = user_id\r\n booking.booking_datetime = booking_datetime\r\n booking.entrance_datetime = entrance_datetime\r\n booking.number_of_people = number_of_people\r\n booking.table_id = table_id\r\n booking.datetime = datetime.datetime.now()\r\n db.session.add(booking)\r\n db.session.commit()\r\n return booking.id\r\n except:\r\n db.session.rollback()\r\n return None",
"def save_car_reservation(car_id, username, date_from, date_to):\n car = get_car_identified_by_id(car_id)\n price = calc_total_price(car.price, date_from, date_to)\n session = start_session()\n new_car_reservation = CarReservation(car_id, username, date_from, date_to, price)\n session.add(new_car_reservation)\n session.commit()\n queryset = session.query(CarReservation).filter(and_(CarReservation.id_car.__eq__(car_id),\n CarReservation.id_user.__eq__(username),\n CarReservation.date_from.__eq__(date_from),\n CarReservation.date_to.__eq__(date_to),\n CarReservation.price.__eq__(price)))\n reservation = queryset2list(queryset)[0]\n session.close()\n return reservation.id_reservation",
"def create_reservations(payload, user_id):\n error = False\n # get posted data from json request\n body = request.get_json()\n keys = body.keys()\n # if request does not have json body, abort 400\n if body is None:\n abort(400)\n # if json does not have key 'auth0_id', abort 400\n if 'auth0_id' not in keys:\n abort(400)\n # if json does not have key 'reservation', abort 400\n if 'reservations' not in keys:\n abort(400)\n # if auth0_id in body does not match auth0_id in payload, abort 401\n if body['auth0_id'] != payload['sub']:\n abort(401)\n\n # query who is accessing\n access_user = User.query.filter_by(auth0_id=payload['sub']).first()\n # check if user_id in URL matches the access user id\n if user_id != access_user.id:\n raise AuthError({\n 'code': 'Invalid_claims',\n 'description': 'Unauthorized access by user'\n }, 401)\n\n # query clothes and store them in variable \"clothes\"\n if not isinstance(body['reservations'], list):\n abort(400)\n for value in body['reservations']:\n if not isinstance(value, int):\n abort(400)\n # check if all clothes indeed exist\n clothes = []\n for clothes_id in body['reservations']:\n # query clothes\n selection = Clothes.query.get(clothes_id)\n if selection is None:\n abort(404)\n # if that clothes has been already reserved, abort 422\n if selection.status == \"reserved\":\n abort(422)\n clothes.append(selection)\n\n # query user\n user = User.query.get(user_id)\n formatted_user = user.format()\n\n # make reservations\n try:\n reservations = []\n formatted_clothes = []\n for item in clothes:\n new_reservation = Reserve()\n new_reservation.user = user\n new_reservation.clothes = item\n item.status = \"reserved\"\n reservations.append(new_reservation)\n # commit these reservations\n for reservation in reservations:\n reservation.insert()\n formatted_clothes.append(reservation.clothes.format())\n except Exception:\n # rollback all sessions\n for reservation in reservations:\n reservation.rollback()\n error = True\n print(sys.exc_info())\n finally:\n # close all sessions\n for reservation in reservations:\n reservation.close_session()\n\n if error:\n abort(422)\n else:\n return jsonify({\n 'success': True,\n 'clothes': formatted_clothes,\n 'user': formatted_user\n })",
"def save(self, *args, **kwargs):\n if not self.pk:\n self.start_time_rent = datetime.date.today()\n self.end_time_rent = self.start_time_rent + datetime.timedelta(days=7)\n self.reservation.isrented = True\n self.reservation.save()\n return super(Rental, self).save(*args, **kwargs)",
"def add_booking():\n try:\n \n carid = request.form[\"carid\"]\n userid = request.form[\"userid\"]\n fromdate = request.form[\"fromdate\"].strip()\n todate = request.form[\"todate\"].strip()\n\n print(fromdate, \"|\", todate)\n\n car = Car.query.get(carid)\n car.isavailable = False\n\n user = User.query.get(userid)\n user_email = user.email\n\n fromdate_obj = datetime.datetime.strptime(fromdate, '%Y-%m-%d')\n todate_obj = datetime.datetime.strptime(todate, '%Y-%m-%d')\n \n summary = \"Car Booking. Car id: \" + carid\n\n cal = CalendarUtil()\n resp = cal.addToCalendar(user_email, fromdate_obj, todate_obj, summary)\n cal_event_id = resp['id']\n booking = Booking(carid=carid, userid=userid, fromdate=fromdate, todate=todate, caleventid= cal_event_id, isactive=True)\n\n test = db.session.add(booking)\n db.session.commit()\n return bookingSchema.jsonify(booking)\n except Exception as ex:\n print(\"Failed to add event to calender. Exception: \", str(ex))\n return jsonify(None)",
"def add_ip_reservation(self, body, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async'):\n return self.add_ip_reservation_with_http_info(body, **kwargs)\n else:\n (data) = self.add_ip_reservation_with_http_info(body, **kwargs)\n return data",
"def reservation_update(token_user, res_id):\n if not json_param_exists('room_id') or \\\n not json_param_exists('start') or \\\n not json_param_exists('end'):\n abort(400, 'one or more required parameter is missing')\n\n room_id = request.json['room_id']\n room = Room.query.get(room_id)\n if room is None:\n abort(400, 'invalid room id')\n\n start = parse_datetime(request.json['start'])\n end = parse_datetime(request.json['end'])\n if start is None or end is None:\n abort(400, 'cannot parse start or end date')\n\n res = Reservation.query.get(res_id)\n if res is None:\n abort(400, 'invalid reservation id')\n\n if not token_user.has_permission('reservation.update.elevated'):\n is_my_reservation = any(map(lambda m: m.id == token_user.id,\n res.team.members))\n if not (is_my_reservation and\n token_user.has_permission('reservation.update')):\n abort(403, 'insufficient permissions to update reservation')\n\n res.room = room\n res.start = start\n res.end = end\n\n attempt_override = False\n if json_param_exists(\"override\") and isinstance(request.json[\"override\"], bool):\n attempt_override = request.json[\"override\"]\n\n conflict_status, conflicting_reservations = res.validate_conflicts()\n if conflict_status == Reservation.NO_CONFLICT:\n pass\n elif conflict_status == Reservation.CONFLICT_OVERRIDABLE:\n if attempt_override:\n # Delete conflicting reservations\n for conflict in conflicting_reservations:\n get_db().delete(conflict)\n else:\n return json.dumps({\"overridable\": True}), 409\n elif conflict_status == Reservation.CONFLICT_FAILURE:\n return json.dumps({\"overridable\": False}), 409\n\n get_db().commit()\n\n return '', 204",
"def add_reservation(self, src, dst,duration, bandwidth):\n \n # locks the self.current_reservations data structure. This is done\n # because there is a thread that could access it concurrently.\n with self.update_lock:\n\n # PART 1, TASK 3.4 check if there is an existing reservation for (src,dst). \n # you can use the self.current_reservations dictionary to check it.\n # If the reservation exists get the path and bw and update the links capacity \n # data structure using `self.add_link_capacity(path, bw)`\n \n # PART 1, TASK 3.1. Once get_available_path is implemented call it to get a path.\n path = self.get_available_path(src, dst, bandwidth)\n\n # PART 1, TASK 3.2 If there is an available path \n if path: \n pass\n # PART 1, TASK 3.2 Get mpls stack of labels\n\n # PART 1, TASK 3.3 get:\n # 1) ingress switch name\n # 2) action name using `mpls_ingress_x_hop` set x as number of labels\n # 3) src and dst ips (your match)\n # 4) make sure all your labels are strings and use them as action parameters\n\n # PART 1, TASK 3.4\n\n # check if its a new or an existing reservation (to update)\n\n # add entry or modify\n # PART 2 TASK 1.4 Configure the associated meter properly.\n\n # update controllers data structures: self.current_reservation & self.links_capacity\n \n\n # PART 1, TASK 3.2 otherwise we print no path available\n else:\n # PART 1, task 4.3 if we dont find a path but the reservation existed\n # you have to erase it while making sure you update links_capacity accordingly \n print(\"\\033[91mRESERVATION FAILURE: no bandwidth available!\\033[0m\")",
"def save(self, *args, **kwargs):\n if not self.pk:\n self.start_time_booking = datetime.date.today()\n self.end_time_booking = self.start_time_booking + datetime.timedelta(days=5)\n self.cars.quantity -= 1\n self.cars.save()\n return super(Reservation, self).save(*args, **kwargs)",
"def form_valid(self, form, **kwargs):\n reservation = Reservation(start_time=form.cleaned_data[\"start_time\"],\n end_time=form.cleaned_data[\"end_time\"], user=self.request.user,\n machine=form.cleaned_data[\"machine\"], comment=form.cleaned_data[\"comment\"])\n\n if form.cleaned_data[\"event\"]:\n reservation.event = form.cleaned_data[\"event\"]\n\n if form.cleaned_data[\"special\"]:\n reservation.special = True\n reservation.special_text = form.cleaned_data[\"special_text\"]\n\n return self.validate_and_save(reservation, form)",
"def schedule_reservation(reservation_date,reservation_time,party_size,restaurant_name,first_name,restaurant_address):\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('calendar', 'v3', credentials=creds)\n\n # Call the Calendar API\n now = datetime.datetime.utcnow()\n\n reservation_day=reservation_date.split('/')[0]\n reservation_month =reservation_date.split('/')[1]\n reservation_year =reservation_date.split('/')[2]\n reservation_date = reservation_year+'-'+reservation_month+'-'+reservation_day\n start_time_hr= reservation_time[:2]\n end_time_hr= int(reservation_time[:2])+4\n start_time_min= reservation_time[2:]\n end_time_min=start_time_min\n \n \n event = {\n 'summary': 'Reservation at '+restaurant_name,\n 'location': restaurant_address,\n 'description': 'Reservation for '+party_size+' under '+first_name+' made on '+str(now),\n 'start': {\n 'dateTime': reservation_date+'T'+start_time_hr+':'+start_time_min+':00+08:00',\n 'timeZone': 'Asia/Singapore',\n },\n 'end': {\n 'dateTime': reservation_date+'T'+str(end_time_hr)+':'+end_time_min+':00+08:00',\n 'timeZone': 'Asia/Singapore',\n },\n 'reminders': {\n 'useDefault': False,\n 'overrides': [\n {'method': 'email', 'minutes': 24 * 60},\n {'method': 'popup', 'minutes': 10},\n ],\n },\n }\n\n event = service.events().insert(calendarId='primary', body=event).execute()\n print ('Event created: %s', (event.get('htmlLink')))",
"def reservs(request):\n a = request.GET\n print(a)\n if request.method == 'POST':\n # create a form\n form = NewReservationsOfficesForm(data=request.POST)\n if form.is_valid():\n form.save()\n return redirect('coworkings:index')\n else:\n form = NewReservationsOfficesForm()\n\n context = {\"form\": form}\n return render(request, 'coworkings/reservs.html', context)",
"def addToReservation():\n\n def fits(x, y):\n \"\"\"\n Check if a job shape's resource requirements will fit within a given node allocation\n \"\"\"\n return y.memory <= x.memory and y.cores <= x.cores and y.disk <= x.disk\n\n def subtract(x, y):\n \"\"\"\n Adjust available resources of a node allocation as a job is scheduled within it.\n \"\"\"\n return Shape(x.wallTime, x.memory - y.memory, x.cores - y.cores, x.disk - y.disk)\n\n def split(x, y, t):\n \"\"\"\n Partition a node allocation into two\n \"\"\"\n return (Shape(t, x.memory - y.memory, x.cores - y.cores, x.disk - y.disk),\n NodeReservation(Shape(x.wallTime - t, x.memory, x.cores, x.disk)))\n\n i = 0 # Index of node reservation\n while True:\n # Case a new node reservation is required\n if i == len(nodeReservations):\n x = NodeReservation(subtract(nodeShape, jS))\n nodeReservations.append(x)\n t = nodeShape.wallTime\n while t < jS.wallTime:\n y = NodeReservation(x.shape)\n t += nodeShape.wallTime\n x.nReservation = y\n x = y\n return\n\n # Attempt to add the job to node reservation i\n x = nodeReservations[i]\n y = x\n t = 0\n \n while True:\n if fits(y.shape, jS):\n t += y.shape.wallTime\n \n # If the jS fits in the node allocation from x to y\n if t >= jS.wallTime:\n t = 0\n while x != y:\n x.shape = subtract(x.shape, jS)\n t += x.shape.wallTime\n x = x.nReservation\n assert x == y\n assert jS.wallTime - t <= x.shape.wallTime\n if jS.wallTime - t < x.shape.wallTime:\n x.shape, nS = split(x.shape, jS, jS.wallTime - t)\n nS.nReservation = x.nReservation\n x.nReservation = nS\n else:\n assert jS.wallTime - t == x.shape.wallTime\n x.shape = subtract(x.shape, jS)\n return \n \n # If the job would fit, but is longer than the total node allocation\n # extend the node allocation\n elif y.nReservation == None and x == nodeReservations[i]:\n # Extend the node reservation to accommodate jS\n y.nReservation = NodeReservation(nodeShape)\n \n else: # Does not fit, reset\n x = y.nReservation\n t = 0\n \n y = y.nReservation\n if y is None:\n # Reached the end of the reservation without success so stop trying to\n # add to reservation i\n break\n i += 1",
"def add_new_item(self, request, *a, **kw):\n item_def = request.data\n cpdoc = self.get_object()\n item_def['calendar_plan'] = cpdoc.id\n\n item_ser = self.get_serializer(data=item_def)\n item_ser.is_valid(raise_exception=True)\n item_obj = item_ser.save()\n\n headers = self.get_success_headers(item_ser.data)\n return response.Response(item_ser.data, headers=headers)",
"def post(self, flight_id):\n data = request.get_json()\n seat = 1\n if data:\n seat = data.get('seat')\n current_user = get_jwt_identity()\n try:\n flight = get_flight(flight_id)\n if not flight:\n return generate_response('Selected flight not available', 400)\n\n if seat == 1 and flight.booked_economy < flight.airplane.economy_seats:\n data = dict(booked_economy=flight.booked_economy+1)\n save_booking(current_user, flight_id)\n flight.update(flight, **data)\n return generate_response('Economy seat flight reservation successfull', 201)\n\n if seat == 2 and flight.booked_business < flight.airplane.business_seats:\n data = dict(booked_business=flight.booked_business+1)\n save_booking(current_user, flight_id)\n flight.update(flight, **data)\n return generate_response('Business seat flight reservation successfull', 201)\n\n except Exception as e:\n db.session.rollback()\n return jsonify({'error': str(e)}), 401",
"def insert_reservation(house, id, check_in_date, check_in_time, check_out_date, guest_name,\n guest_cell, guest_telegram, num_guest, comment, confirm):\n sql = \"\"\"INSERT INTO %s VALUES(%s, '%s', '%s', '%s', '%s', '%s', '%s', %s, '%s', %s) RETURNING reservation_id;\"\"\"\n conn = None\n reservation_id = None\n try:\n # read database configuration\n params = config()\n # connect to the PostgreSQL database\n conn = psycopg2.connect(**params)\n # create a new cursor\n cur = conn.cursor()\n # execute the INSERT statement\n print(sql % (house, id, check_in_date, check_in_time, check_out_date, guest_name,\n guest_cell, guest_telegram, num_guest, comment, confirm))\n cur.execute(sql, (house, id, check_in_date, check_in_time, check_out_date, guest_name,\n guest_cell, guest_telegram, num_guest, comment, confirm))\n # get the generated id back\n vendor_id = cur.fetchone()[0]\n # commit the changes to the database\n conn.commit()\n # close communication with the database\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n\n return reservation_id",
"def addBooking(self, booking):\n self.bookings.addBooking(booking.getID())",
"def reservation(self):\n return self.request.get('reservation', None)",
"def SaveToReservationSQL(self, order, recipientid):\n\n # get counter, increase it, save counter, and use for reservation\n # managerid, recipientid,\n # insert reservation \\'{reservation_guid}\\\n # insert people\n # insert subclaim\n\n cursor = self.cursor\n\n reservation_guid = order[\"id\"]\n km_number = order[\"crmid\"]\n cursor.execute('select TOP 1 id from reservation where trash=0 and (guid=? or ndog=?) order by id desc',\n (reservation_guid, km_number))\n\n row = cursor.fetchone()\n if (not row):\n reservation_new = 1\n reservationid = None\n else:\n reservation_new = 0\n reservationid = row[0]\n\n # check subclaims\n # reservation_to_delete=row[0]\n # query='select id from subclaim where claimid=?'\n # cursor.execute(query,reservation_to_delete)\n # rows=cursor.fetchall()\n # if rows :\n # query='select number from reservation where id=?'\n # cursor.execute(query,reservation_to_delete)\n # row = cursor.fetchone()\n # self.number = row[0]\n\n # TODO - update existing reservation\n # return 0\n\n # query='update reservation set trash=1 where id=?'\n # cursor.execute(query,reservation_to_delete)\n\n # create reservation if it is missing\n\n if reservation_new == 0:\n\n cursor.execute('select number from reservation where id=? and trash=0', reservationid)\n row = cursor.fetchone()\n number = row[0]\n self.number = number\n\n else:\n number = km_number\n self.number = number\n\n print('Dogovor number ', number, 'KM', km_number, 'reservationid ', reservationid)\n\n manager_guid = order[\"manager\"][\"id\"]\n query = f'select id from recipient where guid=\\'{manager_guid}\\''\n cursor.execute(query)\n row = cursor.fetchone()\n humanid = row[0]\n\n guid = order[\"id\"]\n currency = order[\"cruises\"][0][\"currency\"]\n print(currency)\n\n date_created = datetime.fromisoformat(order[\"created\"][:order[\"created\"].find('.')])\n\n query = '''\ninsert into dbo.[reservation]\n([number], [cdate], [recipientid], [humanid], [officeid], [legalid], [statusid],\n [pdate], [currencyid],[ndog],[guid])\nvalues (?,?,?,?,?,?,?,?,?,?,?)\n'''\n\n # TODO officeid by manager, legalid by owner, statusid?\n ## if reservation is not exist create new, else update\n values = (\n km_number, date_created, recipientid, humanid, 29921, 136, 2, date_created, currencymap[currency],\n order[\"crmid\"],\n guid)\n print(values)\n if (reservation_new == 1) and (km_number):\n cursor.execute(query, values)\n cursor.execute(\"select IDENT_CURRENT('reservation')\")\n row = cursor.fetchone()\n id = row[0]\n cursor.execute('exec ChangesLog_AddNew ?,?,?,?,?,?,?,?,?,?,?,?,?', (\n 'robot python', 1, 'reservation', id, km_number, 'reservation', id, str(id), None, None, '', None, ''))\n\n\n elif (reservation_new == 0) and (km_number):\n update_query = \"\"\" update dbo.[reservation] \n set cdate = ?, recipientid=?, humanid = ?, officeid=?, legalid=?, statusid=?, pdate=?, currencyid=?, guid =?, ndog = ? where id=?\"\"\"\n cursor.execute(update_query, (\n date_created, recipientid, humanid, 29921, 136, 2, date_created, currencymap[currency], guid, km_number,\n reservationid))\n id = reservationid\n else:\n id = 0\n return id, reservation_new",
"def __init__(__self__,\n resource_name: str,\n args: Optional[ReservationArgs] = None,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def reservation(self):\n return self._reservation",
"def reservation_mark_entrance(user: User, reservation: Reservation):\n owns_restaurant = reservation.restaurant.operator == user\n if owns_restaurant and reservation.status is ReservationState.ACCEPTED and reservation.reservation_time <= datetime.datetime.now():\n #Might want to add user notification\n reservation.entrance_time = datetime.datetime.now()\n reservation.status = ReservationState.SEATED\n db.session.commit()\n return True\n\n return False",
"def form_valid(self, form, **kwargs):\n reservation = kwargs[\"reservation\"]\n # The user is not allowed to change the machine for a reservation\n if reservation.machine != form.cleaned_data[\"machine\"]:\n return redirect(\"my_reservations\")\n\n reservation.comment = form.cleaned_data[\"comment\"]\n\n reservation.start_time = form.cleaned_data[\"start_time\"]\n reservation.end_time = form.cleaned_data[\"end_time\"]\n if reservation.event:\n reservation.event = form.cleaned_data[\"event\"]\n\n if reservation.special:\n reservation.special_text = form.cleaned_data[\"special_text\"]\n\n return self.validate_and_save(reservation, form)",
"def room_add():\n if not json_param_exists('number'):\n abort(400, 'invalid room number')\n\n if not isinstance(request.json['number'], str):\n abort(400, 'room number must be string')\n\n num = request.json['number']\n room = Room(number=num)\n\n try:\n get_db().add(room)\n get_db().commit()\n except IntegrityError:\n abort(409, 'room number is already in use')\n return json.dumps(room.as_dict(include_features=False)), 201"
] |
[
"0.75879747",
"0.7112902",
"0.6949954",
"0.6692203",
"0.650362",
"0.63385224",
"0.6337414",
"0.62899226",
"0.62474537",
"0.61857814",
"0.6139244",
"0.61172265",
"0.60497403",
"0.60310566",
"0.6028615",
"0.6002983",
"0.596607",
"0.58925354",
"0.5867518",
"0.5861634",
"0.58234113",
"0.58197767",
"0.58127135",
"0.5779862",
"0.5776109",
"0.576195",
"0.57400244",
"0.5672108",
"0.56688523",
"0.5621449"
] |
0.75634503
|
1
|
Get an existing reservation
|
async def get_reservation_endpoint(request):
reservation_id = request.args["reservation_id"][0]
reservation_dict = model.get_reservation(reservation_id)
return json(reservation_dict)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def reservation(self):\n return self.request.get('reservation', None)",
"def reservation(self):\n return self._reservation",
"def reservation_read(token_user, res_id):\n res = Reservation.query.get(res_id)\n if res is None:\n abort(404, 'reservation not found')\n\n return json.dumps(res.as_dict(for_user=token_user))",
"def select_reservation(self, ctx: dataclasses.dataclass) -> ResultE[dataclasses.dataclass]:\n pk = cf.get_int_or_none(ctx.pk) or 0\n if pk <= 0:\n return self._error('Missed Reservation ID', ctx, ReservationErrors.missed_reservation)\n try:\n data = self._reservations_repo.get(pk)\n except Exception as err:\n return self._error(\n f\"Error select Reservation ID={pk} in House ID={ctx.house.id}\", ctx, ReservationErrors.error, exc=err\n )\n if data == Nothing:\n return self._error(\n f\"Unknown Reservation ID={pk} in House ID={ctx.house.id}\", ctx, ReservationErrors.missed_reservation\n )\n if hasattr(ctx, 'source'):\n ctx.source = data.unwrap()\n else:\n ctx.reservation = data.unwrap()\n return Success(ctx)",
"def show_reservation(self, reservation_id):\n\n # create an instance of the model\n reserv_model = Reservation(self.settings)\n\n # query the model\n r = reserv_model.find_reservation(reservation_id)\n\n # return the result in a json-ifiable form\n json_reservation = r.to_json()\n\n # return\n print json_reservation\n return json_reservation",
"def get_res_by_id(res_id):\n # look up ID, if non-exist return error message\n res = session.query(Reservation).filter_by(id=res_id).first()\n if not res:\n return jsonify({'error': 'no reservation with id {} found'.format(res_id)}), 400\n return jsonify({'reservation': res.serialize()})",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Reservation':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = ReservationArgs.__new__(ReservationArgs)\n\n __props__.__dict__[\"concurrency\"] = None\n __props__.__dict__[\"creation_time\"] = None\n __props__.__dict__[\"ignore_idle_slots\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"multi_region_auxiliary\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"project\"] = None\n __props__.__dict__[\"reservation_id\"] = None\n __props__.__dict__[\"slot_capacity\"] = None\n __props__.__dict__[\"update_time\"] = None\n return Reservation(resource_name, opts=opts, __props__=__props__)",
"def get_pass_reservations(pass_id):\n cursor.execute(\"\"\"select * from reservations where paying_passenger_id= %s\"\"\", [pass_id]) #query\n reservations = cursor.fetchall() #fetch all reservations related to that passenger\n\n return reservations",
"def reservation_add(token_user):\n if not json_param_exists('team_id') or \\\n not json_param_exists('room_id') or \\\n not json_param_exists('start') or \\\n not json_param_exists('end'):\n abort(400, 'one or more required parameter is missing')\n\n team_id = request.json['team_id']\n team = Team.query.get(team_id)\n if team is None:\n abort(400, 'invalid team id')\n\n if not (token_user.has_permission('reservation.create') and team.has_member(token_user)):\n abort(403)\n\n room_id = request.json['room_id']\n room = Room.query.get(room_id)\n if room is None:\n abort(400, 'invalid room id')\n\n start = parse_datetime(request.json['start'])\n end = parse_datetime(request.json['end'])\n if start is None or end is None:\n abort(400, 'cannot parse start or end date')\n\n if start >= end:\n abort(400, \"start time must be before end time\")\n\n res = Reservation(team=team, room=room, created_by=token_user,\n start=start, end=end)\n\n attempt_override = False\n if json_param_exists(\"override\") and isinstance(request.json[\"override\"], bool):\n attempt_override = request.json[\"override\"]\n\n conflict_status, conflicting_reservations = res.validate_conflicts()\n if conflict_status == Reservation.NO_CONFLICT:\n pass\n elif conflict_status == Reservation.CONFLICT_OVERRIDABLE:\n if attempt_override:\n # Delete conflicting reservations\n for conflict in conflicting_reservations:\n get_db().delete(conflict)\n else:\n return json.dumps({\"overridable\": True}), 409\n elif conflict_status == Reservation.CONFLICT_FAILURE:\n return json.dumps({\"overridable\": False}), 409\n\n get_db().add(res)\n get_db().commit()\n\n return '', 201",
"def reservation_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"reservation_id\")",
"def reservation_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"reservation_id\")",
"def get_wharton_gsr_reservations():\n\n sessionid = get_wharton_sessionid()\n\n if not sessionid:\n return jsonify({'error': 'No Session ID provided.'})\n\n try:\n reservations = wharton.get_reservations(sessionid)\n save_wharton_sessionid()\n return jsonify({'reservations': reservations})\n except APIError as e:\n return jsonify({\"error\": str(e)}), 400",
"def create_new_reservation():\n if not request.json:\n return jsonify({'error': 'no body supplied'}), 400\n\n # look up by date to see if any availability\n res_date = request.json.get('date', None)\n if not res_date:\n error = 'no reservation date supplied'\n flash(error, 'error')\n return jsonify({'error': error}), 400\n\n # check if res time present, if found, convert to DT object\n res_time = request.json.get('time', None)\n if not res_time:\n error = 'no reservation time supplied'\n flash(error, 'error')\n return jsonify({'error': error}), 400\n res_time = time_str_to_obj(res_time)\n\n open_inventory = session.query(Inventory).filter_by(date=res_date).all()\n if not open_inventory:\n error = 'no open inventory for date {}'.format(res_date)\n flash(error, 'error')\n return jsonify({'error': error})\n\n error = 'reservation invalid'\n for inv in open_inventory:\n for window in inv.windows:\n if window.current_res_count < window.max_res_count:\n # check if res date falls in current window\n window_start = time_str_to_obj(window.start_time)\n window_end = time_str_to_obj(window.end_time)\n\n # if requested res time is valid, update res count and save res\n if window_start <= res_time < window_end:\n window.current_res_count = window.current_res_count + 1\n session.add(window)\n\n res = Reservation(**request.json)\n session.add(res)\n resp = session.commit()\n if not resp:\n # send message to flask for creation by name\n flash('reservation for {} created'.format(request.json.get('name')), 'success')\n return jsonify({'message': 'reservation for {} created'.format(request.json.get('name'))})\n else:\n error = 'requested reservation time is not available in current inventory'\n else:\n error = 'current inventory window cannot accept additional reservations, please select different time'\n flash(error, 'error')\n return jsonify({'error': error}), 400",
"def reservation_details():\n session_id = request.args.get('session-id', None)\n user_id = request.args.get('user-id', None)\n reservation_id = request.args.get('reservation-id', None)\n reservation = get_reservation_identified_by_id(reservation_id)\n car = get_car_identified_by_id(reservation.id_car)\n date_from = str(reservation.date_from)\n date_to = str(reservation.date_to)\n total_price = get_total_price(reservation_id)\n if check_authentication(session_id, user_id) and is_reservation_of_the_user(reservation_id, user_id):\n return render_template('car_reservation_details.html', user=user_id, session_id=session_id, car=car,\n reservation_id=reservation_id, date_from=date_from,\n date_to=date_to, total_price=total_price)\n else:\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), authjs=False,\n preview_length=get_cars_preview().__len__(), del_session_cookie=True)",
"def get_reservations():\n start_date = request.args.get('start')\n end_date = request.args.get('end')\n\n if start_date is not None and end_date is not None:\n start = parse_datetime(request.json['start'])\n end = parse_datetime(request.json['end'])\n if start is None or end is None:\n abort(400, 'cannot parse start or end date')\n\n reservations = Reservation.query.filter(\n Reservation.end >= start, Reservation.start <= end)\n else:\n reservations = Reservation.query.filter(\n or_(Reservation.start >= datetime.datetime.now(),\n Reservation.end >= datetime.datetime.now()))\n\n reservations = map(lambda x: x.as_dict(), reservations)\n\n return json.dumps(reservations)",
"def get_reservations_endpoint():\n\n email = request.args.get('email')\n sessionid = request.args.get('sessionid')\n if not email and not sessionid:\n return jsonify({\"error\": \"A session id or email must be sent to server.\"}), 400\n\n libcal_search_span = request.args.get(\"libcal_search_span\")\n if libcal_search_span:\n try:\n libcal_search_span = int(libcal_search_span)\n except ValueError:\n return jsonify({\"error\": \"Search span must be an integer.\"}), 400\n else:\n libcal_search_span = 3\n\n try:\n reservations = get_reservations(email, sessionid, libcal_search_span)\n return jsonify({'reservations': reservations})\n except APIError as e:\n return jsonify({\"error\": str(e)}), 400",
"async def add_reservation_endpoint(request):\n hotel_id = request.args[\"hotel_id\"][0]\n room_type = request.args[\"room_type\"][0]\n arrival_date = request.args[\"arrival_date\"][0]\n departure_date = request.args[\"departure_date\"][0]\n status = request.args[\"status\"][0]\n reservation_id = model.add_reservation(hotel_id, room_type, arrival_date, departure_date, status)\n if reservation_id == model.OPERATION_ERROR_RETURN_CODE:\n return json({\"success\": False})\n return json({\"success\": True, \"reservation_id\": reservation_id})",
"def _quota_reservations_query(context, reservations):\n return model_query(\n context, models.Reservation,\n read_deleted=\"no\",\n ).filter(\n models.Reservation.uuid.in_(reservations),\n ).with_for_update()",
"def reservation_detail(request, reservation_number):\n if not request.user.is_superuser:\n messages.error(request, \"Sorry, you don't have access to this \\\n part of the site.\")\n return redirect(reverse('home'))\n\n amenities = Amenity.objects.all()\n reservation = get_object_or_404(Reservation,\n reservation_number=reservation_number)\n\n messages.info(request, f'This is the confirmation email sent to the guests \\\n after booking for reservation number {reservation_number}.')\n\n template = 'checkout/checkout_success.html'\n context = {\n 'reservation': reservation,\n 'admin': True,\n 'amenities': amenities,\n }\n return render(request, template, context)",
"def getReservationDict():\n table = 'reservations'\n connection = openConnection()\n curs = connection.cursor()\n sqlcmd = \"SELECT * FROM \" + str(table)\n d = {}\n \n curs.execute(sqlcmd)\n for row in curs.fetchall():\n flightData = airlineClasses.Flight(row[2],\n row[3],row[4],row[5],row[6],row[7],row[8],row[9],row[10])\n reservation = airlineClasses.Reservation(row[0],row[1],flightData)\n d[reservation.reservationId] = reservation\n\n curs.close()\n connection.close()\n \n return d",
"def reservation_update(token_user, res_id):\n if not json_param_exists('room_id') or \\\n not json_param_exists('start') or \\\n not json_param_exists('end'):\n abort(400, 'one or more required parameter is missing')\n\n room_id = request.json['room_id']\n room = Room.query.get(room_id)\n if room is None:\n abort(400, 'invalid room id')\n\n start = parse_datetime(request.json['start'])\n end = parse_datetime(request.json['end'])\n if start is None or end is None:\n abort(400, 'cannot parse start or end date')\n\n res = Reservation.query.get(res_id)\n if res is None:\n abort(400, 'invalid reservation id')\n\n if not token_user.has_permission('reservation.update.elevated'):\n is_my_reservation = any(map(lambda m: m.id == token_user.id,\n res.team.members))\n if not (is_my_reservation and\n token_user.has_permission('reservation.update')):\n abort(403, 'insufficient permissions to update reservation')\n\n res.room = room\n res.start = start\n res.end = end\n\n attempt_override = False\n if json_param_exists(\"override\") and isinstance(request.json[\"override\"], bool):\n attempt_override = request.json[\"override\"]\n\n conflict_status, conflicting_reservations = res.validate_conflicts()\n if conflict_status == Reservation.NO_CONFLICT:\n pass\n elif conflict_status == Reservation.CONFLICT_OVERRIDABLE:\n if attempt_override:\n # Delete conflicting reservations\n for conflict in conflicting_reservations:\n get_db().delete(conflict)\n else:\n return json.dumps({\"overridable\": True}), 409\n elif conflict_status == Reservation.CONFLICT_FAILURE:\n return json.dumps({\"overridable\": False}), 409\n\n get_db().commit()\n\n return '', 204",
"def _quota_reservations(session, context, reservations):\n\n # Get the listed reservations\n return model_query(context, models.Reservation,\n read_deleted=\"no\",\n session=session).\\\n filter(models.Reservation.uuid.in_(reservations)).\\\n with_lockmode('update').\\\n all()",
"def show_reservations(self, user_id = None):\n\n # create an instance of the model\n reserv_model = Reservation(self.settings)\n\n # query the model\n results = reserv_model.find_reservations(user_id)\n\n # return the result in a json-ifiable form\n json_results = []\n for reservation in results:\n json_results.append(reservation.to_json())\n\n # return\n print json_results\n return json_results",
"def reservations(self):\n session_id = plone_session.get_session_id(self.context)\n return db.reservations_by_session(session_id).all()",
"def reservation_data(self):\n reservations = []\n\n for reservation in self.reservations():\n resource = utils.get_resource_by_uuid(reservation.resource)\n\n if resource is None:\n log.warn('Invalid UUID %s' % str(reservation.resource))\n continue\n\n resource = resource.getObject()\n\n data = {}\n\n data['title'] = utils.get_resource_title(resource)\n\n timespans = []\n for start, end in reservation.timespans():\n timespans.append(u'◆ ' + utils.display_date(start, end))\n\n data['time'] = '<br />'.join(timespans)\n data['quota'] = utils.get_reservation_quota_statement(\n reservation.quota\n ) if reservation.quota > 1 else u''\n\n data['url'] = resource.absolute_url()\n data['remove-url'] = ''.join((\n resource.absolute_url(),\n '/your-reservations?remove=',\n reservation.token.hex\n ))\n reservations.append(data)\n\n return reservations",
"def reservation(self, reservation):\n\n self._reservation = reservation",
"def reservs(request):\n a = request.GET\n print(a)\n if request.method == 'POST':\n # create a form\n form = NewReservationsOfficesForm(data=request.POST)\n if form.is_valid():\n form.save()\n return redirect('coworkings:index')\n else:\n form = NewReservationsOfficesForm()\n\n context = {\"form\": form}\n return render(request, 'coworkings/reservs.html', context)",
"def create_reservation(self, gs_id, vehicle_id, user_id):\n\n # create the reservation\n reservation = Reservation(self.settings, gs_id, vehicle_id, user_id)\n status, model = reservation.create()\n\n # return status\n if status:\n json_res = model.to_json()\n return True, json_res\n else:\n return False, None",
"def check_reservation(self, gs_id, vehicle_id, user_id, res_type):\n\n # perform the query\n status = reservation_exists(self.settings, user_id, vehicle_id, gs_id, res_type) \n\n # return status\n if status:\n return True\n else:\n return False",
"def reserve_seat(self):\n try:\n # start a new transaction\n self.cnx.start_transaction()\n cur = self.cnx.cursor()\n\n # iterate through the rows of the result until\n # we find a seat that is open\n cur.execute(\"select seat, status from Flights\")\n found = None\n for row in cur.fetchall():\n if row[1] == 0:\n found = row[0]\n break\n\n # if we found an available seat\n if found is not None:\n # wait for user to confirm they want the seat\n print \"seat \", found, \" is open. <Enter> to continue.\"\n sys.stdin.readline()\n\n # update that the seat is taken\n cur.execute(\"update Flights set status = 1 where seat = %s\", (found,))\n self.cnx.commit()\n return found\n else:\n # if failed to reserve that seat then rollback and return None to indicate failure\n self.cnx.rollback()\n return None\n except mysql.connector.InternalError as e:\n print \"failed to reserve: \", e\n try:\n self.cnx.rollback()\n except mysql.connector.InternalError as e:\n # silence\n pass\n return None"
] |
[
"0.7379199",
"0.71298105",
"0.70966446",
"0.70289004",
"0.6932207",
"0.68829936",
"0.66138154",
"0.65487725",
"0.64873415",
"0.64082396",
"0.6286343",
"0.628403",
"0.62172484",
"0.6214408",
"0.61116004",
"0.6033761",
"0.60133725",
"0.5996568",
"0.59386927",
"0.5936628",
"0.5919651",
"0.5905735",
"0.58974254",
"0.58911747",
"0.58231586",
"0.58035326",
"0.5756488",
"0.57189953",
"0.5649887",
"0.563695"
] |
0.7394642
|
0
|
List the inventory of a hotel in a specific date range
|
async def list_inventory_endpoint(request):
hotel_id = request.args["hotel_id"][0]
start_date = request.args["start_date"][0]
end_date = request.args["end_date"][0]
inventory = model.list_inventory(hotel_id, start_date, end_date)
if inventory == model.OPERATION_ERROR_RETURN_CODE:
return json({"success": False})
return json({"success": True, "inventory": inventory})
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def inventory(request, concierge, template=\"concierges/inventory_check.html\"):\n inventory = []\n for x in xrange(0, 2):\n day = date.today() + timedelta(days=x)\n dow = DayOfWeek.objects.get(isoweekday=day.isoweekday())\n day_info = {'day': day, 'times': []}\n schedules = dow.tourschedule_set.filter(active=True, tour_type__active=True,\n tour_type__default_site_skin__is_concierge_cta=True).order_by('tour_type__order')\n for sched in schedules:\n product = sched.tour_type.get_product(day, schedule=sched).product\n tour_info = {\n 'day': day,\n 'time': sched.pretty_time,\n 'tour_type': sched.tour_type,\n 'seats_available': Decimal(product.items_in_stock) - Decimal(product.total_sold)\n }\n day_info['times'].append(tour_info)\n\n inventory.append(day_info)\n\n ctx = RequestContext(request, {\n 'concierge': concierge,\n 'inventory': inventory\n })\n\n return render_to_response(template, context_instance=ctx)",
"def get_date_range():\n start_date = request.args.get(\"start\", default=None, type=str)\n start_date = datetime.datetime.fromisoformat(start_date)\n end_date = request.args.get(\"end\", default=None, type=str)\n end_date = datetime.datetime.fromisoformat(end_date)\n\n animals = []\n for key in rd.keys(\"*\"):\n animal = json.loads(rd.get(key))\n if (\n start_date\n <= datetime.datetime.fromisoformat(animal[\"created-on\"])\n <= end_date\n ):\n animals.append(animal)\n\n return jsonify(animals)",
"def get_slots_for_date(url: str, session: requests.Session) -> List[Dict]:\n response = session.get(\n url,\n headers={\n \"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n \"Adrum\": \"isAjax:true\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n },\n )\n\n slots = list(\n filter(lambda item: item[\"status\"] != \"UnAvailable\", response.json()[\"slots\"])\n )\n\n return slots",
"def list_inventory(self):\n\n print('Your inventory contains:')\n #i = 1\n #inv_dict = {}\n for item in self.bag_of_holding:\n if 'casted' not in item.name:\n try:\n print(item.name)\n except:\n pass\n\n #inv_dict[str(i)] = item\n #i += 1\n #return inv_dict",
"def get_queryset(self): # NOQA\n rates = Rate.objects.filter(\n inventory__date__gte=self.kwargs.get('checkin'),\n inventory__date__lte=self.kwargs.get('checkout'),\n room__hotel__public_id=self.kwargs.get('hotel_id')\n ).values('room__public_id', 'price', \"inventory__date\")\n return rates",
"def _input_date(stock_list: 'a namedtuple') -> list:\n\n stock_info = stock_list\n\n format_date = '%Y-%m-%d'\n start_date = datetime.strptime(input('Start Date: ').strip(), format_date)\n end_date = datetime.strptime(input('End Date: ').strip(), format_date)\n\n result_stock_list = []\n for stock in stock_info:\n date = datetime.strptime(stock.date, format_date)\n if start_date <= date <= end_date:\n result_stock_list.append(stock)\n result_stock_list.sort()\n return result_stock_list",
"def get_reservations():\n start_date = request.args.get('start')\n end_date = request.args.get('end')\n\n if start_date is not None and end_date is not None:\n start = parse_datetime(request.json['start'])\n end = parse_datetime(request.json['end'])\n if start is None or end is None:\n abort(400, 'cannot parse start or end date')\n\n reservations = Reservation.query.filter(\n Reservation.end >= start, Reservation.start <= end)\n else:\n reservations = Reservation.query.filter(\n or_(Reservation.start >= datetime.datetime.now(),\n Reservation.end >= datetime.datetime.now()))\n\n reservations = map(lambda x: x.as_dict(), reservations)\n\n return json.dumps(reservations)",
"def test_get_dealer_historical_inventory(self):\n pass",
"def get_ride_report(startDate, endDate):\n\n results_list = []\n\n session = DB_SESSION()\n\n results = []\n\n results = session.query(Report).filter(Report.date_created>=startDate, Report.date_created<=endDate)\n\n for result in results:\n results_list.append(result.to_dict())\n print(result.to_dict())\n\n session.close()\n\n return results_list, 200",
"def vaccinations(self, from_date: str, to_date: str) -> VaccinationList:\n params = {'date_from': from_date, 'date_to': to_date}\n data = self.get(\"mdg_emvolio\", params=params)\n\n ls = [Vaccination(**area) for area in data]\n return VaccinationList(items=ls)",
"def get_items_sold_between(table, month_from, day_from, year_from, month_to, day_to, year_to):\n\n items_sold_between = []\n index = 0\n start_date = str(year_from) + str(month_from) + str(day_from)\n end_date = str(year_to) + str(month_to) + str(day_to)\n for record in table:\n if end_date > record[-1] > start_date:\n items_sold_between.append(record)\n\n return items_sold_between",
"def fetch_daterange(self, start_date, end_date=None, table='fashion'):\n\n if end_date is None:\n end_date = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')\n\n end_date_obj = datetime.strptime(end_date, '%Y-%m-%d %H:%M:%S')\n end_day = '{:04d}-{:02d}-{:02d}'.format(end_date_obj.year, \n end_date_obj.month, \n end_date_obj.day)\n\n start_date_obj = datetime.strptime(start_date, '%Y-%m-%d %H:%M:%S')\n curr_day = '{:04d}-{:02d}-{:02d}'.format(start_date_obj.year, \n start_date_obj.month, \n start_date_obj.day)\n \n record_lookup_stmt = \"SELECT * FROM {} WHERE date=%s AND t>%s and t<%s\".format(table)\n \n record_list = []\n while curr_day <= end_day: \n record_list += self.session.execute(record_lookup_stmt, [curr_day, \n start_date,\n end_date])\n start_date_obj += timedelta(days=1)\n curr_day = '{:04d}-{:02d}-{:02d}'.format(start_date_obj.year, \n start_date_obj.month, \n start_date_obj.day) \n\n return record_list",
"def date_range(start, end):\n session = Session(engine)\n \n sel = [func.min(measurement.tobs),\n func.max(measurement.tobs),\n func.avg(measurement.tobs)]\n \n range_data = session.query(*sel).\\\n filter(measurement.date >= start).\\\n filter(measurement.date <= end).all()\n \n session.close()\n \n range_x = list(np.ravel(range_data))\n\n return jsonify(range_x)",
"def date_range(start_date, end_date):\n list_dates = []\n for n in range((end_date + timedelta(1) - start_date).days):\n temp_date = start_date + timedelta(n)\n list_dates.append(temp_date.strftime('%Y%m%d'))\n return list_dates",
"def date_range(start_date, end_date):\n list_dates = []\n for n in range((end_date + timedelta(1) - start_date).days):\n temp_date = start_date + timedelta(n)\n list_dates.append(temp_date.strftime('%Y%m%d'))\n return list_dates",
"def temp_range(start_date, end_date):\n \"\"\"for dates between the start and end date inclusive.\"\"\"\n results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()\n\n # Convert list of tuples into normal list\n startend = list(np.ravel(results))\n\n return jsonify(startend)",
"def getPurchaseDates(self):\n\t\treturn self.dateList",
"def get_daily_list(context, data_dict):\n # noinspection PyUnresolvedReferences\n\n output = []\n start_date_str = _get_or_bust(data_dict, 'startDate')\n try:\n dt.strptime(start_date_str, '%Y-%m-%d')\n except ValueError:\n raise _ValidationError(\n 'startDate \\'{0}\\' not in YYYY-MM-DD format'.format(start_date_str)\n )\n start_date = parse(start_date_str,\n default=default_release_date).astimezone(gettz('UTC'))\n\n if 'endDate' in data_dict:\n end_date_str = data_dict['endDate']\n try:\n dt.strptime(end_date_str, '%Y-%m-%d')\n except ValueError:\n raise _ValidationError(\n 'endDate \\'{0}\\' not in YYYY-MM-DD format'.format(end_date_str)\n )\n end_date = parse(end_date_str,\n default=default_release_date).astimezone(gettz('UTC'))\n days = (end_date - start_date).days + 1\n if days < 1:\n raise _ValidationError(_(\n 'endDate \\'{0}\\' must be greater '\n 'than startDate \\'{1}\\''.format(\n end_date_str,\n start_date_str\n )\n ))\n else:\n days = 1\n\n for day in range(days):\n single_date = (start_date + datetime.timedelta(days=day))\n single_date_str = single_date.replace(tzinfo=None).isoformat()\n q = {\n 'q': (\n 'product_type_code:24 AND '\n 'last_release_date:\"{release_date}Z\"'.format(\n release_date=single_date_str\n )\n )\n }\n\n results = _get_action('package_search')(context, q)\n\n count = results['count']\n if count > 1:\n raise _ValidationError(\n 'More than one Daily for date \\'{0}\\''.format(single_date_str)\n )\n\n for result in results['results']:\n children = []\n\n for child in result.get('child_list', []):\n children.append(\n get_product(context, {\n 'productId': child\n })\n )\n\n result['children'] = children\n output.append(result)\n\n return output",
"def get_items_sold_between(table, month_from, day_from, year_from, month_to, day_to, year_to):\n\n min_date = common.dtime(year_from, month_from, day_from)\n max_date = common.dtime(year_to, month_to, day_to)\n\n return [[line[ID], line[TITLE], int(line[PRICE]), int(line[MONTH]), int(line[DAY]), int(line[YEAR])]\n for line in table if min_date < common.dtime(line[YEAR], line[MONTH], line[DAY]) < max_date]",
"def range_date():\n # Query all stations within a certain range\n data = [Measurement.date, func.max(Measurement.tobs), func.min(Measurement.tobs), func.avg(Measurement.tobs)]\n qry = session.query(*data).filter(Measurement.date.between('2014-01-17', '2017-01-01')).all()\n before_date = list(np.ravel(qry))\n\n return jsonify(before_date)",
"def create_date_list(start_date = start_date, end_date = end_date):",
"def list(self, request):\n currentYear = datetime.now().year\n expenses = Expenses.objects.filter(\n date_purchased__contains=currentYear)\n serializer = ExpenseSerializer(\n expenses, many=True, context={'request': request})\n return Response(serializer.data)",
"def find_by_date():\n\n input_date = request.args.get('date')\n \n user_id = session['user']\n user_inv = (UserInv.query.filter_by(user_id=user_id)).all()\n\n inv_by_date = []\n\n for item in user_inv: \n if str(item.inv.date_of_investment) == input_date:\n inv_by_date.append({\"company\": item.inv.company_name, \n \"quantity\": item.inv.quantity, \n \"cost\": item.inv.cost})\n print inv_by_date\n\n return jsonify(inv_by_date)",
"def getListIngr(cls):\n\n # meals = Meals.getMealsByFutureDate(user=session['User'])\n list_ingr = db.session.query(RecipeIngredient).join(Recipe).join(Meals).\\\n join(Ingredient).\\\n filter(func.substr(Meals.date_planned,0,11) >= func.substr(datetime.today(),0,11)).\\\n filter(Meals.recipe_fk==Recipe.recipe_id).\\\n filter(Recipe.recipe_id==RecipeIngredient.recipe_fk).\\\n filter(RecipeIngredient.ingredient_name==Ingredient.name).\\\n filter(Meals.user_fk==session['User']).\\\n order_by(Meals.date_planned).all()\n\n return list_ingr",
"def list_inventory():\n res = {}\n offers = Offer.query.order_by(Offer.offer_id).all()\n for offer in offers:\n res[offer.offer_id] = {'total': count_total_goods(offer.offer_id), 'unallocated': count_available_goods(offer.offer_id)}\n return res",
"def planets_in_range(self):\n\n query_string = \"SELECT * from planets_in_range;\"\n\n # Perform query\n self.conn_cur.execute(query_string)\n results = self.conn_cur.fetchall()\n\n # Build dictionary\n ranges = {}\n for row in results:\n ranges[row[0]] = row[1]\n\n return ranges",
"def get_ride_request(startDate, endDate):\n\n results_list = []\n\n session = DB_SESSION()\n\n results = []\n\n results = session.query(Request).filter(Request.date_created >= startDate, Request.date_created <=endDate)\n\n for result in results:\n results_list.append(result.to_dict())\n print(result.to_dict())\n\n session.close()\n\n return results_list, 200",
"def zenith_range_dates(list_dates, timeframe):\r\n\r\n\tzeniths = []\r\n\r\n\tfor date in list_dates:\r\n\t\tsolar_noon = l.solar_noon(date=date, local=True)\r\n\t\tsolar_zenith = l.solar_elevation(solar_noon.replace(tzinfo=None))\r\n\t\tzeniths.append(solar_zenith)\r\n\r\n\tlist_dates = [date.isoformat() for date in list_dates]\r\n\r\n\tif timeframe == 'last_seven_days' or timeframe == 'this_month' or timeframe == 'last_month':\r\n\t\tformat = 'M/D'\r\n\telif timeframe == 'this_year' or timeframe == 'last_year':\r\n\t\tformat = 'MMM D'\r\n\r\n\treturn {'labels': list_dates, 'data': zeniths, 'yAxisLabel': 'Solar Zenith', 'format': format}",
"def visitRange(self, date):\n raise NotImplementedError()",
"def _filter_by_date(from_date, until_date):\n qlist = []\n\n if from_date:\n qlist.append(Q(oai_date_stamp__gte=from_date))\n\n if until_date:\n qlist.append(Q(oai_date_stamp__lte=until_date))\n\n return qlist"
] |
[
"0.5880754",
"0.55358046",
"0.55146176",
"0.54974526",
"0.54716426",
"0.54641354",
"0.5431443",
"0.5413283",
"0.5400029",
"0.53998613",
"0.5383372",
"0.5372556",
"0.5365455",
"0.5363467",
"0.5363467",
"0.5358362",
"0.5348541",
"0.53345364",
"0.5328369",
"0.53199214",
"0.530684",
"0.5281139",
"0.5279798",
"0.52777106",
"0.5256574",
"0.51891476",
"0.5188826",
"0.5152989",
"0.51402503",
"0.5125461"
] |
0.71391815
|
0
|
Best Path Heuristic (consistent) (seems to be a very good heuristic) Gives the roomba the ability to pass through walls and ignore additional cost on carpet 1. Find which dirty tile is best to start from For each dirty tile in state.dirty_locations 1.1 Set it as the start node 1.2 Use Total Manhattan Distance(third heuristic) to find route of least cost to visit every other dirty tile 1.3 Compare with previous start tile, and keep the better start (tiebreak with roomba proximity to start tile) 2. Find roomba proximity to the best start tile 3. Add the results of steps 1 and 2 The heuristic is the sum of the distance to the best start tile and the cost from said tile
|
def spotlessroomba_second_heuristic(state : SpotlessRoombaState) -> float:
# TODO a nontrivial consistent heuristic
if not state.dirty_locations:
return 0
best_start = 0 # best dirty tile to start from
best_cost = INF # cost of the path from the above start tile
for i in range(len(state.dirty_locations)):
estimate_cost = 0
lowest_cost = INF
closest_dirty = 0
dirty_locations = list(state.dirty_locations)
current_pos = dirty_locations.pop(i)
# find the shortest cost solution path from this starting tile
while dirty_locations:
for j in range(len(dirty_locations)):
manhattan = abs(current_pos.row - dirty_locations[j].row) + abs(current_pos.col - dirty_locations[j].col)
if manhattan < lowest_cost:
lowest_cost = manhattan
closest_dirty = j
estimate_cost += lowest_cost
current_pos = dirty_locations.pop(closest_dirty)
lowest_cost = INF
# if estimated path cost is cheaper than best path cost so far, replace best_cost and best_start
if estimate_cost < best_cost:
best_cost = estimate_cost
best_start = i
# if estimated path cost and best path cost so far are equal, tiebreak with proximity to start tile
if estimate_cost == best_cost:
current_pos = state.position
dist_to_prev_best = abs(current_pos.row - state.dirty_locations[best_start].row) + abs(current_pos.col - state.dirty_locations[best_start].col)
dist_to_i = abs(current_pos.row - state.dirty_locations[i].row) + abs(current_pos.col - state.dirty_locations[i].col)
if dist_to_i < dist_to_prev_best:
best_start = i
current_pos = state.position
# Calculate distance to the best start tile
dist_to_start = abs(current_pos.row - state.dirty_locations[best_start].row) + abs(current_pos.col - state.dirty_locations[best_start].col)
# Returned heuristic is the sum of distance to the start tile and estimated cost from said tile
return dist_to_start + best_cost
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_heuristic(self, state):\n\n def get_manhattan_distance(coord_a, coord_b):\n \"\"\"Returns the manhattan distance between coord_a and coord_b.\"\"\"\n return abs(coord_a.x - coord_b.x) + abs(coord_a.y - coord_b.y)\n\n \n def get_num_obstacles(coord_a, coord_b):\n \"\"\"Returns the number of obstacles (wriggler segments or walls) between\n coord_a and coord_b.\n \n This function assumes that coord_b is larger (in either/both x and y)\n than coord_a.\n \"\"\"\n obstacle_count = 0\n \n for x in range(coord_a.x, coord_b.x + 1):\n for y in range(coord_a.y, coord_b.y + 1):\n coord = Coordinate(x, y)\n if coord in self.wall_coords or coord in state:\n obstacle_count += 1\n \n return obstacle_count\n\n\n head_coord = state.wriggler_list[0].get_head()\n tail_coord = state.wriggler_list[0].get_tail()\n \n head_manhattan_distance = get_manhattan_distance(head_coord, self.goal_coord)\n tail_manhattan_distance = get_manhattan_distance(tail_coord, self.goal_coord)\n \n # Calculate and return heuristic value depending on which heuristic to use\n if self.heuristic == Heuristic.MANHATTAN_DIST:\n # Return the shortest Manhattan distance of wriggler0's tail or head to the goal\n return min(head_manhattan_distance, tail_manhattan_distance)\n \n else: # self.heuristic == Heuristic.NUM_OBSTACLES:\n # Return the number of obstacles between wriggler0's tail/head to the goal\n # The tail/head is selected based on which is closer to the goal\n if head_manhattan_distance <= tail_manhattan_distance:\n # The head is closer or the same distance away\n return get_num_obstacles(head_coord, self.goal_coord)\n \n else:\n # The tail is closer\n return get_num_obstacles(tail_coord, self.goal_coord)",
"def spotlessroomba_third_heuristic(state : SpotlessRoombaState) -> float:\n h = 0\n current_position = state.position\n dirty_locations = list(state.dirty_locations)\n partial_heuristic = INF\n closest_dirty = 0\n\n while dirty_locations:\n for i in range(len(dirty_locations)):\n manhattan = abs(current_position.row - dirty_locations[i].row) + abs(current_position.col - dirty_locations[i].col)\n if manhattan < partial_heuristic:\n partial_heuristic = manhattan\n closest_dirty = i\n h += partial_heuristic\n current_position = dirty_locations.pop(closest_dirty)\n partial_heuristic = INF\n \n return h",
"def spotlessroomba_first_heuristic(state : SpotlessRoombaState) -> float:\n # TODO a nontrivial admissible heuristic\n return len(state.dirty_locations)",
"def extra(maze):\n # TODO: Write your code here\n heuristic_lookup = {} \n objs = maze.getObjectives()\n corner_list = maze.getObjectives()\n start = maze.getStart()\n path = []\n dim = maze.getDimensions()\n visited = {}\n lookup_table = {}\n p_queue = []\n edgeset = []\n mintree = {}\n start_heuristic = 0 + multi_dot_heuristic_query(maze, start, objs, edgeset, mintree) * 2\n heuristic_lookup[(start, tuple(objs))] = start_heuristic\n start_state = state(start, corner_list)\n lookup_table[state(start, corner_list)] = (start_heuristic, 0, state((-2, -2)))\n p_queue.append((start_heuristic, state(start, corner_list)))\n while p_queue:\n pair = p_queue.pop(0)\n visited[pair[1]] = lookup_table.get(pair[1])[2]\n if not pair[1].getlist():\n current_state = pair[1]\n while current_state != start_state:\n path.append(current_state.getpos())\n current_state = visited.get(current_state)\n path.append(start)\n path.reverse()\n return path\n else: \n list_of_neighbors = maze.getNeighbors(pair[1].getpos()[0], pair[1].getpos()[1])\n for coordinates in list_of_neighbors:\n current_state = state(coordinates)\n if coordinates in pair[1].getlist():\n new_list = copy.copy(pair[1].getlist())\n new_list.remove(coordinates)\n current_state = state(coordinates, new_list)\n else:\n current_state = state(coordinates, pair[1].getlist()) \n if current_state in visited:\n continue\n if current_state in lookup_table:\n if (lookup_table.get(current_state)[0], current_state) in p_queue:\n cost = lookup_table.get(pair[1])[1] + 1\n queried_heuristic = 0\n if (current_state.getpos(), tuple(current_state.getlist())) in heuristic_lookup:\n queried_heuristic = heuristic_lookup.get((current_state.getpos(), tuple(current_state.getlist())))\n else:\n queried_heuristic = multi_dot_heuristic_query(maze, current_state.getpos(), current_state.getlist(), edgeset, mintree) * 2\n heuristic_lookup[(current_state.getpos(), tuple(current_state.getlist()))] = queried_heuristic\n heuristic = queried_heuristic + cost\n old_heuristic = lookup_table.get(current_state)[0]\n if heuristic < lookup_table.get(current_state)[0]:\n lookup_table[current_state] = (heuristic, cost, pair[1])\n p_queue.remove((old_heuristic, current_state))\n bisect.insort(p_queue, (heuristic, current_state))\n else:\n cost = lookup_table.get(pair[1])[1] + 1\n queried_heuristic = 0\n if (current_state.getpos(), tuple(current_state.getlist())) in heuristic_lookup:\n queried_heuristic = heuristic_lookup.get((current_state.getpos(), tuple(current_state.getlist()))) \n else:\n queried_heuristic = multi_dot_heuristic_query(maze, current_state.getpos(), current_state.getlist(), edgeset, mintree) * 2\n heuristic_lookup[(current_state.getpos(), tuple(current_state.getlist()))] = queried_heuristic\n heuristic = queried_heuristic + cost\n lookup_table[current_state] = (heuristic, cost, pair[1])\n bisect.insort(p_queue, (heuristic, current_state))\n\n return []",
"def foodHeuristic(state, problem):\n position, foodGrid = state\n \"*** YOUR CODE HERE ***\"\n \"\"\"\n Mi heurística consiste en hacer simplemente el máximo de las distancias reales del state a cada nodo con comida\n He provado diferentes heurísticas y esta es la que me expande menos nodos, aunque no es la más óptima temporalmente\n Tardé mucho tiempo en darme cuenta de que había una función que calculaba la distancia real entre dos nodos\n NOTA: NO EJECUTAR CON LABERINTOS MÁS GRANDES QUE EL tinySearch. El algoritmo requiere muchísimo tiempo\n \"\"\"\n max = 0 # Inicializo el máximo en 0\n for food in foodGrid.asList(): # Esto me da cada food como un nodo (x,y), pero sólo los nodos que tengan comida\n distance = mazeDistance(position, food, problem.startingGameState) # Distancia real del state a una comida\n if max < distance: # Cálculo del máximo\n max = distance\n return max\n\n # La siguiente heurística también servía, y de hecho tardaba mucho menos, pero el autograder me daba 2/4\n # ya que se expandían más de 12.000 nodos.\n # return len(foodGrid.asList())",
"def astar_multi(maze):\n heuristic_lookup = {} \n objs = maze.getObjectives()\n corner_list = maze.getObjectives()\n start = maze.getStart()\n path = []\n dim = maze.getDimensions()\n visited = {}\n lookup_table = {}\n p_queue = []\n edgeset = []\n mintree = {}\n start_heuristic = 0 + multi_dot_heuristic_query(maze, start, objs, edgeset, mintree) \n heuristic_lookup[(start, tuple(objs))] = start_heuristic\n start_state = state(start, corner_list)\n lookup_table[state(start, corner_list)] = (start_heuristic, 0, state((-2, -2)))\n p_queue.append((start_heuristic, state(start, corner_list)))\n while p_queue:\n pair = p_queue.pop(0)\n visited[pair[1]] = lookup_table.get(pair[1])[2]\n if not pair[1].getlist():\n current_state = pair[1]\n while current_state != start_state:\n path.append(current_state.getpos())\n current_state = visited.get(current_state)\n path.append(start)\n path.reverse()\n return path\n else: \n list_of_neighbors = maze.getNeighbors(pair[1].getpos()[0], pair[1].getpos()[1])\n for coordinates in list_of_neighbors:\n current_state = state(coordinates)\n if coordinates in pair[1].getlist():\n new_list = copy.copy(pair[1].getlist())\n new_list.remove(coordinates)\n current_state = state(coordinates, new_list)\n else:\n current_state = state(coordinates, pair[1].getlist()) \n if current_state in visited:\n continue\n if current_state in lookup_table:\n if (lookup_table.get(current_state)[0], current_state) in p_queue:\n cost = lookup_table.get(pair[1])[1] + 1\n queried_heuristic = 0\n if (current_state.getpos(), tuple(current_state.getlist())) in heuristic_lookup:\n queried_heuristic = heuristic_lookup.get((current_state.getpos(), tuple(current_state.getlist())))\n else:\n queried_heuristic = multi_dot_heuristic_query(maze, current_state.getpos(), current_state.getlist(), edgeset, mintree)\n heuristic_lookup[(current_state.getpos(), tuple(current_state.getlist()))] = queried_heuristic\n heuristic = queried_heuristic + cost\n old_heuristic = lookup_table.get(current_state)[0]\n if heuristic < lookup_table.get(current_state)[0]:\n lookup_table[current_state] = (heuristic, cost, pair[1])\n p_queue.remove((old_heuristic, current_state))\n bisect.insort(p_queue, (heuristic, current_state))\n else:\n cost = lookup_table.get(pair[1])[1] + 1\n queried_heuristic = 0\n if (current_state.getpos(), tuple(current_state.getlist())) in heuristic_lookup:\n queried_heuristic = heuristic_lookup.get((current_state.getpos(), tuple(current_state.getlist())))\n else:\n queried_heuristic = multi_dot_heuristic_query(maze, current_state.getpos(), current_state.getlist(), edgeset, mintree) \n heuristic_lookup[(current_state.getpos(), tuple(current_state.getlist()))] = queried_heuristic\n heuristic = queried_heuristic + cost\n lookup_table[current_state] = (heuristic, cost, pair[1])\n bisect.insort(p_queue, (heuristic, current_state))\n return []",
"def foodHeuristic(state, problem):\n import itertools\n\n\n\n def manhattan(startPosition, targetPosition):\n xy1 = startPosition\n xy2 = targetPosition\n return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])\n\n position, foodGrid = state\n\n return len(foodGrid.asList())\n #\n # \"\"\"\n # The below algorithm is from:\n # https://stackoverflow.com/questions/9994913/pacman-what-kinds-of-heuristics-are-mainly-used\n #\n # Find real/manhattan distance between two currently furthest fruits in labyrinth - let's call that x.\n # Find real/manhattan distance from current Pacman position to the closer of previous two fruits - let's call that y.\n # Then, answer is just: x + y.\n # The interpretation of this x + y formula could be something like this:\n #\n # x - either way, you will have to travel this distance, at least at the end\n # y - while you are at the some of the two furthest fruits, it's better to collect\n # the food that is near to it so you don't have to go back\n # \"\"\"\n # maxFoodPairDistance = 0\n #\n # if len(foodGrid.asList()) >= 2:\n #\n # #calculate manhattan/real distance between each pair of food (all permutations in foodGrid) and find the maximum of them, and\n # #store the pair with max distance in maxFoodPair\n # for foodPair in itertools.permutations(foodGrid.asList(),2):\n # #foodPairDistance = mazeDistance(foodPair[0], foodPair[1], problem.startingGameState)\n # foodPairDistance = manhattan(foodPair[0], foodPair[1])\n # if foodPairDistance >= maxFoodPairDistance:\n # maxFoodPairDistance = foodPairDistance\n # maxFoodPair = foodPair\n #\n # #get the real distance between pacman and nearest food among the max distance food pair we get above. Using real distance instead\n # #of manhattan distance here just to \"reduce\" the number of nodes expand to get additional point. But that's a bit of a cheating\n # #because the mazeDistance function use of breadth First search - which itself is a search with nodes expansion not counted here\n # #minPacmanToFoodDistance = min([mazeDistance(position, foodPosition, problem.startingGameState) for foodPosition in maxFoodPair])\n # minPacmanToFoodDistance = min([manhattan(position, foodPosition) for foodPosition in maxFoodPair])\n #\n # #When only one food left, just return the real distance between pacman and food\n # elif len(foodGrid.asList()) == 1:\n # foodPosition = foodGrid.asList()[0]\n # #minPacmanToFoodDistance = mazeDistance(position, foodPosition, problem.startingGameState)\n # minPacmanToFoodDistance = manhattan(position, foodPosition)\n # else:\n # minPacmanToFoodDistance = 0\n #\n # return minPacmanToFoodDistance + maxFoodPairDistance",
"def registerInitialState(self, gameState):\n\n # stuff\n self.treeDepth = 4\n self.oldFood = []\n self.lastEatenFood = None\n self.i = 0\n\n\n\n #oldFood\n self.oldFood = self.getFoodYouAreDefending(gameState)\n\n\n self.red = gameState.isOnRedTeam(self.index)\n self.distancer = distanceCalculator.Distancer(gameState.data.layout)\n\n # comment this out to forgo maze distance computation and use manhattan distances\n self.distancer.getMazeDistances()\n\n\n\n \n\n \n # FIND PATROL POINTS\n\n\n\n x = gameState.data.layout.width/2-8\n #print \"WIDTH \", x+4\n\n y1 = gameState.data.layout.height-4\n y2 = 0+4\n\n\n\n point1 = (x,y2)\n point2 = (x,y1)\n topPoints = []\n botPoints = []\n for i in range(0,6):\n xv = x+i\n if not gameState.data.layout.walls[xv][y1]:\n\n newBP = (xv, y1)\n botPoints.append(newBP)\n else:\n newBP = (xv, y1)\n #print newBP, \" in wall\"\n\n if not gameState.data.layout.walls[xv][y2]:\n newTP = (xv, y2)\n topPoints.append(newTP)\n else:\n newTP = (xv, y2)\n #print newTP, \" in wall\"\n\n\n\n\n\n # FIND PATROL POINTS WITH THE SHORTEST PATH\n bestTP = topPoints[0]\n bestBP = botPoints[0]\n\n bestPath = self.getMazeDistance(bestTP,bestBP)\n for tp in topPoints:\n bp = min(botPoints, key=lambda p: self.getMazeDistance(tp, p))\n tempPath = self.getMazeDistance(tp, bp)\n if (tempPath < bestPath):\n bestTP = tp\n bestBP = bp\n bestPath = tempPath\n\n #print \"THE REAL BEST POINTS: \", bestBP, \" \", bestTP, \" \", bestPath\n\n self.patrolPoints = [bestTP,bestBP]\n\n\n\n\n\n\n import __main__\n if '_display' in dir(__main__):\n self.display = __main__._display",
"def astar_corner(maze):\n # TODO: Write your code here\n \"\"\"\n Plan:\n Do normal a* but then .clear visited after each new goal is found\n new h = Manhattan distance to the nearest goal and then the manhattan distance to the other goals starting from this nearest goal. \n new priority queue -- tuple (f, x&y, goals_left, \n \"\"\"\n pq = []\n visited = {}\n\n goals = maze.getObjectives()\n start = maze.getStart()\n\n tie = 1\n #\n # tuple = (f,g,h,x&y,tiebreaker, goals left, currpath, visited)\n f = min_manhattan(goals, start)\n curr = (f, 0, f, start, goals, 0, [])\n heapq.heappush(pq, curr)\n\n food = None\n while len(pq) > 0:\n curr = heapq.heappop(pq)\n #print(\"curr:\", curr)\n if curr[3] in curr[4]:\n curr[4].remove(curr[3])\n if len(curr[4]) == 0:\n #print(\"DONE\")\n #print(food)\n food = curr\n break\n neighbors = maze.getNeighbors(curr[3][0], curr[3][1])\n for n in neighbors:\n curr_goals_left = curr[4].copy()\n curr_visited = curr[6].copy()\n tie += 1\n #print(\"curr[6]: \", curr[6])\n #print(\"n: \", n)\n #print(\"curr[4]: \", curr[4])\n h2 = min_manhattan(curr[4], n)\n f2 = h2 + curr[1]\n g2 = curr[1] + 1\n\n node_new = (f2, g2, h2, n, curr_goals_left, tie, curr_visited)\n \n if node_new[3] not in visited or node_new[4] not in visited[node_new[3]][1]:\n if node_new[3] not in visited:\n visited[node_new[3]] = (node_new[3], [])\n visited[node_new[3]][1].append(node_new[4])\n node_new[6].append(curr[3])\n heapq.heappush(pq, node_new)\n\n if food is None:\n return []\n\n food[6].append(food[3])\n\n return food[6]",
"def cornersHeuristic(state, problem):\n\n # Useful information.\n # corners = problem.corners # These are the corner coordinates\n # walls = problem.walls # These are the walls of the maze, as a Grid.\n\n # *** Your Code Here ***\n corners = problem.corners # These are the corner coordinates\n # walls = problem.walls # These are the walls of the maze, as a Grid.\n\n # Get unvisited corners\n successor = [False, False, False, False]\n currentPosition = state[0]\n currentStatus = state[1]\n\n # Take the manhattan distance of the nodes\n # current position and all corners tuple location\n # Iterate through all corners\n for corner in range(len(corners)):\n successor[corner] = distance.manhattan(currentPosition,\n corners[corner]) * (not currentStatus[corner]) # Ignore corners already visited\n return max(successor) # Return the max value from all calculated manhattan values of all corner",
"def astar(grid, heuristic):\r\n evaluatedMap = {}\r\n unevaluatedMap = {}\r\n start = grid.getStart()\r\n goal = grid.getGoals()[0]\r\n startG = 0\r\n startH = heuristic(start,goal)\r\n currentNode = Node(start,startH,startG)\r\n unevaluatedMap[currentNode.coord] = currentNode\r\n \r\n while len(unevaluatedMap) > 0:\r\n # I tried using a PriorityQueue but because a node could end up with \r\n # an updated priority it really didn't make sense to use one and\r\n # instead had to just serach the dictionary each time for the smallest\r\n # priority which is the sum of g and h\r\n currentNode = min(unevaluatedMap.values(),key=lambda x:x.g + x.h)\r\n \r\n # if the current node is the goal then create the path by iterating backwards\r\n # and pushing the current node to the front of the path and then moving to the\r\n # parent node\r\n if currentNode.coord == goal:\r\n path = []\r\n while currentNode.parentNode:\r\n path.insert(0,currentNode.coord)\r\n currentNode = currentNode.parentNode\r\n path.insert(0,currentNode.coord)\r\n grid.setPath(path)\r\n return\r\n \r\n # Move the current node to the evaluated map and delete it from\r\n # the unevaluated map\r\n evaluatedMap[currentNode.coord] = currentNode\r\n del unevaluatedMap[currentNode.coord]\r\n \r\n # Mark the current node as having been visited\r\n grid.addVisited(currentNode.coord)\r\n \r\n # Get the neighbors of the current node\r\n neighbors = grid.getNeighbors(currentNode.coord)\r\n\r\n # For each neighbor check if that neighbor has alread been evaluated\r\n # if it has then skip that neighbor. If it hasn't and it isn't in the\r\n # unevaluated map add it with a high cost and heuristic.\r\n # Get the neighbor from the unevaluated map and calculate the current\r\n # cost. If the current cost is less than what existed update the neighbor\r\n # and add it back to the list otherwise skip to next neighbor\r\n for neighbor in neighbors:\r\n ncoord = (neighbor[0])\r\n if (ncoord) in evaluatedMap:\r\n continue\r\n if (ncoord) not in unevaluatedMap:\r\n node = Node(ncoord,float('inf'),float('inf'))\r\n unevaluatedMap[ncoord] = node\r\n \r\n node = unevaluatedMap[ncoord]\r\n calc_cost = currentNode.g + neighbor[1]\r\n if calc_cost >= node.g:\r\n continue\r\n \r\n node.parentNode = currentNode\r\n node.g = calc_cost\r\n node.h = heuristic(ncoord,goal)",
"def lazy_a_star(agent):\n h1 = manhattan_heuristics\n h2 = search.straight_line_heursitic\n expanded_nodes.clear()\n\n # convert from numpy to regulat list, heappush has problems with numpy\n start_pos = (agent.start[0], agent.start[1])\n goal_pos = (agent.goal[0], agent.goal[1])\n current_pos = start_pos\n\n # initialization\n print(\"\\nCoordinate Configuration: (Y, X)\")\n print(\"Start State:\", start_pos)\n print(\"Goal State:\", goal_pos, \"\\n\")\n\n open_list = PQueue()\n closed_list = dict()\n root = {'loc': start_pos, 'g_val': 0,'h2_applied': False, 'h_val': h1(start_pos, goal_pos), 'parent': None}\n \n open_list.put(root, compare_lazyA)\n #push_node(open_list, root)\n closed_list[(root['loc'])] = root\n\n nodes_expanded = 0\n max_size_of_open = len(open_list.elements)\n while len(open_list.elements) > 0:\n # nodes_expanded += 1\n if len(open_list.elements) > max_size_of_open: # space complexity\n max_size_of_open = len(open_list.elements)\n\n node = open_list.get() #pop_node(open_list)\n if node['h2_applied'] == False:\n nodes_expanded += 1 # time complexity\n \n \n expanded_nodes.append(node['loc'])\n current_pos = node['loc']\n agent.current[0] = current_pos[0]\n agent.current[1] = current_pos[1]\n\n # path to goal state has been found\n if (node['loc'][0] == agent.goal[0] and node['loc'][1] == agent.goal[1]):\n print(\"SOLUTION FOUND!\")\n print(\"NODES EXPANDED:\", nodes_expanded)\n print(\"MAX SIZE OF OPEN_LIST:\", max_size_of_open)\n return get_path(node), expanded_nodes\n \n if node['h2_applied'] == False:\n if h1(node['loc'], goal_pos) < h2(node['loc'], goal_pos):\n node['h_val'] = h2(node['loc'], goal_pos)\n node['h2_applied'] = True\n open_list.put(node, compare_lazyA)\n else:\n \n # take movement option indices in agentBase.nextStep()...\n # map out viable indices to locations in map\n move_options = agent.nextStep()\n move_list =[]\n \n for i in range(len(move_options)):\n if move_options[i] == 1:\n move_list.append((node['loc'][0], node['loc'][1]+1))\n if move_options[i] == 2:\n move_list.append((node['loc'][0]+1, node['loc'][1]))\n if move_options[i] == 3:\n move_list.append((node['loc'][0], node['loc'][1]-1))\n if move_options[i] == 4: \n move_list.append((node['loc'][0]-1, node['loc'][1]))\n \n # end of for in loop\n \n # for valid locations, create movement child\n for move in move_list:\n child = {'loc': move,\n 'h2_applied': False,\n 'g_val': node['g_val'] + 1,\n 'h_val': h1(move, goal_pos),\n 'parent': node}\n if not (child['loc']) in closed_list: # pruning\n \n \n closed_list[(child['loc'])] = child\n #push_node(open_list, child)\n open_list.put(child, compare_lazyA)\n # end of for in loop\n\n # end of while\n return None # Failed to find solutions",
"def registerInitialState(self, gameState):\r\n \r\n '''\r\n Make sure you do not delete the following line. If you would like to\r\n use Manhattan distances instead of maze distances in order to save\r\n on initialization time, please take a look at\r\n CaptureAgent.registerInitialState in captureAgents.py.\r\n '''\r\n CaptureAgent.registerInitialState(self, gameState)\r\n \r\n \r\n self.teamMates = []\r\n for mate in self.getTeam(gameState):\r\n if mate is not self.index:\r\n self.teamMates.append(mate)\r\n \r\n def getSuccessors(walls, state):\r\n successors = []\r\n for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:\r\n x,y = state\r\n dx, dy = Actions.directionToVector(action)\r\n nextx, nexty = int(x + dx), int(y + dy)\r\n if not walls[nextx][nexty]:\r\n nextState = (nextx, nexty)\r\n cost = 1\r\n successors.append( ( nextState, action, cost) )\r\n return successors\r\n \r\n \r\n \r\n class o0State:\r\n def __init__(self, pos, node = None):\r\n self.pos = pos\r\n self.node = node\r\n self.deadEndDepth = 0.0\r\n self.successors = {}\r\n self.successorsByNodePos = {}\r\n def isDeadEndNode(self):\r\n if self.node is None:\r\n return False\r\n noneDeadEndCount = 0\r\n for successor in self.successors.values():\r\n if not successor.isDeadEnd:\r\n noneDeadEndCount += 1\r\n return noneDeadEndCount is 1\r\n class o0Node:\r\n def __init__(self, pos):\r\n self.pos = pos\r\n self.isDeadEnd = False\r\n class o0Successor:\r\n def __init__(self, direction, nextPos, nextNodePos = None):\r\n self.direction = direction\r\n self.nextPos = nextPos\r\n self.nextNodePos = nextNodePos\r\n self.isDeadEnd = False\r\n\r\n class o0PathMap:\r\n def __init__(self, gameState):\r\n #print 'init pathMap'\r\n walls = gameState.getWalls()\r\n positions = walls.asList(False)\r\n self.states = {}\r\n self.nodes = {}\r\n for pos in positions:\r\n self.states[pos] = o0State(pos)\r\n for successor in getSuccessors(walls,pos):\r\n self.states[pos].successors[successor[1]] = o0Successor(successor[1],successor[0])\r\n successorCount = len(self.states[pos].successors)\r\n if successorCount is not 2:\r\n node = o0Node(pos)\r\n self.nodes[pos] = node\r\n self.states[pos].node = node\r\n \r\n def connectNode(node):\r\n for nodeSuccessor in self.states[node.pos].successors.values():\r\n if nodeSuccessor.nextNodePos is None:\r\n forwardSuccessors = [nodeSuccessor]\r\n backwardSuccessors = []\r\n previousPos = node.pos\r\n currentPos = nodeSuccessor.nextPos\r\n while currentPos not in self.nodes.keys():\r\n #print node.pos\r\n #print currentPos\r\n if len(self.states[currentPos].successors) is not 2:\r\n print 'not a path'\r\n for successor in self.states[currentPos].successors.values():\r\n #print successor.nextPos\r\n if successor.nextPos[0] is previousPos[0] and successor.nextPos[1] is previousPos[1]:\r\n backwardSuccessors.append(successor)\r\n else:\r\n forwardSuccessors.append(successor)\r\n previousPos = currentPos\r\n currentPos = forwardSuccessors[len(forwardSuccessors) - 1].nextPos\r\n for successor in self.states[currentPos].successors.values():\r\n if successor.nextPos is previousPos:\r\n backwardSuccessors.append(successor)\r\n \r\n for successor in forwardSuccessors:\r\n successor.nextNodePos = currentPos\r\n for successor in backwardSuccessors:\r\n successor.nextNodePos = node.pos\r\n \r\n #connectNode(self.nodes.values()[0])\r\n #connectNode(self.nodes.values()[1])\r\n #connectNode(self.nodes.values()[2])\r\n #connectNode(self.nodes.values()[3])\r\n #connectNode(self.nodes.values()[4])\r\n #connectNode(self.nodes.values()[5])\r\n \r\n for node in self.nodes.values():\r\n connectNode(node)#'''\r\n for state in self.states.values():\r\n for successor in self.states[state.pos].successors.values():\r\n self.states[state.pos].successorsByNodePos[successor.nextNodePos] = successor\r\n \r\n updatedNodes = self.nodes.values()\r\n while(len(updatedNodes) is not 0):\r\n nodePool = updatedNodes\r\n updatedNodes = []\r\n for node in nodePool:\r\n if self.states[node.pos].isDeadEndNode():\r\n self.nodes[node.pos].isDeadEnd = True\r\n for successor in self.states[node.pos].successors.values():\r\n self.states[successor.nextNodePos].successorsByNodePos[node.pos].isDeadEnd = True\r\n updatedNodes.append(self.states[successor.nextNodePos])\r\n \r\n #node.isDeadEnd = self.states[node.pos].isDeadEndNode()#'''\r\n \r\n '''\r\n for node in self.nodes.values():\r\n if self.states[node.pos].isDeadEndNode():\r\n node.isDeadEnd = True#'''\r\n \r\n deadEndNodes = {}\r\n noneDeadEndNodes = {}\r\n for node in self.nodes.values():\r\n if not node.isDeadEnd:\r\n noneDeadEndNodes[node.pos] = node\r\n else:\r\n deadEndNodes[node.pos] = node\r\n \r\n for node in deadEndNodes.values():#\r\n actions = breadthFirstSearch(AnyTargetSearchProblem(gameState,noneDeadEndNodes.keys(),node.pos))\r\n nodeConnectedTo = self.nodes[performActions(node.pos, actions)] \r\n actions = reverseActions(actions)\r\n pos = nodeConnectedTo.pos\r\n deadEndDepth = 0.0\r\n for action in actions:\r\n pos = performActions(pos,[action])\r\n deadEndDepth += 1.0\r\n self.states[pos].deadEndDepth = deadEndDepth\r\n def willDie(self, position, distance, scaredTime = 0):#distance from our agent to closest enemy\r\n deadEndDepth = self.states[position].deadEndDepth\r\n if deadEndDepth >= distance - deadEndDepth and deadEndDepth >= scaredTime:\r\n return True\r\n return False\r\n def isDeadEnd(self, position):\r\n return self.states[position].deadEndDepth >= 0.5\r\n #def getAllStatesInDeadEnd(self, anyState):\r\n \r\n\r\n global pathMap\r\n if pathMap is None:\r\n pathMap = o0PathMap(gameState)\r\n self.pathMap = pathMap\r\n targets[self.index] = None\r\n global lastEattenFoodAreDefendingPos\r\n lastEattenFoodAreDefendingPos = None \r\n global totalFood\r\n totalFood = len(self.getFood(gameState).asList())\r\n global leftFood\r\n leftFood = totalFood\r\n #self.debugDraw(pathMap.deadEndNodes.keys(),[1,0,0])\r\n #self.debugDraw(pathMap.nodes.keys(),[0,1,0])\r\n \r\n global pathMapDebugMode\r\n if pathMapDebugMode:\r\n for state in self.pathMap.states.values():\r\n deadEndColor = 0.3 + state.deadEndDepth * 0.1\r\n if deadEndColor>1.0:\r\n deadEndColor = 1.0\r\n if state.deadEndDepth == 0:\r\n deadEndColor = 0.0\r\n \r\n nodeColor = 0.0\r\n if state.node is not None:\r\n nodeColor = 0.5\r\n self.debugDraw(state.pos,[deadEndColor,0,0])\r\n\r\n self.curryFoodScore = 0.8\r\n \r\n \r\n \r\n global defenseWall\r\n global defensePositions\r\n if len(defenseWall) is 0:\r\n foods = self.getFoodYouAreDefending(gameState)\r\n for capsule in self.getCapsulesYouAreDefending(gameState):\r\n foods[capsule[0]][capsule[1]] = True\r\n defenseWall = actionsToPositions((0,0), aStarSearch(DefenseSearchProblem(gameState, foods, self.index),nullHeuristic))\r\n defensePositions = getPositionsNeededToDefense(gameState)\r\n global defenseWallDebugMode\r\n if defenseWallDebugMode is True:\r\n self.debugDraw(defenseWall,[0,0.5,0])\r\n self.debugDraw(defensePositions,[0.5,0,0])\r\n \r\n global agentInDeadEnd\r\n agentInDeadEnd[self.index] = False",
"def search(state, goal_state):\n\n def gn(node):\n return node.gn()\n\n tiles_places = []\n for i in range(len(goal_state)):\n for j in range(len(goal_state)):\n heapq.heappush(tiles_places, (goal_state[i][j], (i, j)))\n\n def hn(node):\n cost = 0\n for i in range(len(node.state)):\n for j in range(len(node.state)):\n tile_i, tile_j = tiles_places[node.state[i][j]][1]\n if i != tile_i or j != tile_j:\n cost += abs(tile_i - i) + abs(tile_j - j)\n return cost\n\n def fn(node):\n return gn(node) + hn(node)\n\n return bfs.search(state, goal_state, fn)",
"def a_star_search(problem, heuristic=null_heuristic):\r\n \"*** YOUR CODE HERE ***\"\r\n fringe = util.PriorityQueue()\r\n path = set()\r\n final = []\r\n acts = dict()\r\n state = problem.get_start_state()\r\n fringe.push(state, 0)\r\n\r\n while (True):\r\n state = fringe.pop()\r\n path.add(state)\r\n states = problem.get_successors(state)\r\n acts[state] = states[:]\r\n if problem.is_goal_state(state):\r\n break\r\n\r\n states = problem.get_successors(state)\r\n # push into fringe\r\n for stat in states:\r\n if stat[0] not in path:\r\n \"\"\"\r\n it does worse in corners problems, to work better needs heavy huristic, not worth in\r\n in corners problem expandend nodes grow expo\r\n all others are better\r\n counter = 0 # in some situation it helps, in some it doesnt\r\n #print(stat[0].pieces)\r\n for x in stat[0].pieces[0]:\r\n if x:\r\n counter += 1\r\n \"\"\"\r\n counter = 0\r\n fringe.push(stat[0], stat[2] + counter + heuristic(stat[0], problem)) # problem.get_cost_of_actions([stat[1]])\r\n\r\n while (True):\r\n\r\n for key, val in acts.items():\r\n for va in val:\r\n if va[0] == state:\r\n final.append(va[1])\r\n state = key\r\n break\r\n else:\r\n continue\r\n break\r\n if state == problem.get_start_state():\r\n break\r\n\r\n final.reverse()\r\n\r\n return final",
"def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n # Initialize data structures\n parent_node = {}\n path_to_node = {}\n priority_queue = util.PriorityQueue()\n\n p_c = 0.5\n h_c = 1 - p_c\n\n # Get the start node\n start_node = problem.getStartState()\n parent_node[start_node] = None\n path_to_node[start_node] = []\n priority_queue.update(start_node, 0)\n\n #goal_found = False\n\n while not priority_queue.isEmpty():\n # Get the next node\n node_to_expand = priority_queue.pop()\n # Check if goal state is reached\n if problem.isGoalState(node_to_expand):\n break\n next_nodes = problem.getSuccessors(node_to_expand)\n path_to_parent = path_to_node[node_to_expand]\n\n for one_node in next_nodes:\n point, move, cost = one_node\n curr_path = path_to_node[node_to_expand] + [move]\n curr_cost = problem.getCostOfActions(curr_path)\n heuristic_cost = heuristic(point, problem)\n # Check if current node already exists in the previously visited nodes\n if point in path_to_node:\n prev_cost = problem.getCostOfActions(path_to_node[point])\n if prev_cost > curr_cost:\n path_to_node[point] = curr_path\n priority_queue.update(point, curr_cost + heuristic_cost)\n \n else:\n path_to_node[point] = curr_path\n priority_queue.update(point, curr_cost + heuristic_cost)\n \n # current_cost = problem.getCostOfActions(point) * p_c + heuristic(point, problem) * h_c\n\n print(node_to_expand) \n return path_to_node[node_to_expand]\n \n# nodes_to_expand = set()\n# # get max value node in the fringe node\n# min_val = float(\"inf\")\n# for one_node in fringe_node:\n# # Compute the cost to reach a node\n# total_cost = cost_to_point[one_node] * p_c + heuristic(one_node,problem) * h_c\n# if total_cost < min_val:\n# min_val = total_cost\n# \n# for one_node in fringe_node:\n# # Compute the cost to reach a node\n# total_cost = cost_to_point[one_node] * p_c + heuristic(one_node,problem) * h_c\n# if total_cost == min_val:\n# nodes_to_expand.add(one_node)\n# fringe_node.remove(one_node)\n#\n# # Expand the fringe node \n# for one_node in nodes_to_expand:\n# path_to_parent = path_to_point[one_node]\n# for nxt_node in problem.getSuccessors(one_node):\n# pos = nxt_node[0]\n# mv = nxt_node[1]\n# # check if point already present in path to point\n# prev_cost = float(\"inf\")\n# if pos in cost_to_point:\n# prev_cost = cost_to_point[pos]\n# new_path = path_to_parent + [mv]\n# if prev_cost > problem.getCostOfActions(new_path):\n# path_to_point[pos] = new_path\n# cost_to_point[pos] = problem.getCostOfActions(new_path)\n# fringe_node.append(pos)\n#\n# # Check if destination is reached in the fringe node\n# for one_node in fringe_node:\n# if problem.isGoalState(one_node):\n# final_node = one_node\n# goal_found = True\n# break\n# \n# #print(len(fringe_node))\n# print(final_node)\n# print(path_to_point[final_node])\n# return path_to_point[final_node] \n\n util.raiseNotDefined()",
"def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n class Node:\n def __init__(self, state, parent, action, pathCost):\n self.state = state\n self.parent = parent\n self.action = action\n self.pathCost = pathCost\n\n def solution(self):\n path = list()\n tempNode = self\n while tempNode.state != problem.getStartState():\n path.insert(0, tempNode.action)\n tempNode = tempNode.parent\n return path\n\n def __eq__(self, other):\n if isinstance(other, Node):\n return self.state == other.state\n\n\n def childNode(successor, parent, action, stepCost):\n pathCost = parent.pathCost + stepCost\n child = Node(successor, parent, action, pathCost)\n return child\n\n initialNode = Node(problem.getStartState(), None, None, 0)\n frontier = util.PriorityQueue() #bfs uses a queue\n frontier.push(initialNode, initialNode.pathCost + heuristic(initialNode.state, problem)) #we use f(n) = pathCost + h(n) for the best solution\n explored = set()\n\n while not frontier.isEmpty() :\n nextNode = frontier.pop() #extract from the start of the queue\n if problem.isGoalState(nextNode.state):\n return nextNode.solution()\n explored.add(nextNode.state)\n for successor, action, stepCost in problem.getSuccessors(nextNode.state):\n child = childNode(successor, nextNode, action, stepCost)\n if child.state not in explored:\n frontier.update(child, child.pathCost + heuristic(child.state, problem))\n return []\n util.raiseNotDefined()",
"def foodHeuristic(state, problem):\n\n position, foodGrid = state\n\n # *** Your Code Here ***\n if len(foodGrid.asList()) == 0: # If no food, then no need to go on\n return 0\n trackHeuristic = []\n # Manhattan dist between curr node position and all foods\n # If there is food, iterate through all available foods\n for food in foodGrid.asList():\n currentHeuristic = distance.manhattan(position, food)\n trackHeuristic.append(currentHeuristic)\n return max(trackHeuristic)",
"def cornersHeuristic(state, problem):\n corners = problem.corners # These are the corner coordinates\n walls = problem.walls # These are the walls of the maze, as a Grid (game.py)\n \"*** YOUR CODE HERE ***\"\n \"\"\"\n En este ejercicio me he dado cuenta de un problema de mi definición del espacio de estados:\n - El espacio de estados consiste en tuplas ((x,y), grid), donde (x,y) es la posición en coordenadas\n y grid es la tabla de true/false.\n - El problema es que yo he pensado la tabla grid en forma de matriz matemática, de manera que los índices\n no van de acuerdo con la posición de las esquinas, sinó con los índices de una matriz.\n Para solucionar este problema sin tener que modificar todo lo anterior (dado que no me queda tiempo) lo que he\n tenido que hacer es crear una lista y añadir de forma ordenada los valores true/false, para que se corresponda\n cada uno con su esquina.\n \n Mi heurística consiste en lo siguiente:\n * Calculo la distancia desde la posición en la que me sitúo hasta todos los corners no visitados (los que aún\n tienen comida) y me quedo con la mínima de estas distancias, y con el corner que me de esa mínima.\n * Calculo la distancia desde ese corner (el mínimo de antes) hasta todos los otros posibles corners no visitados\n y de nuevo me quedo con la mínima distancia y con el corner que me da esa mínima.\n * Repito este proceso hasta que no queden corners.\n Entonces lo que hago es definir una nueva lista de corners, newListOfCorners que irá extrayendo los corners a medida\n que su distanca sea calculada. Por ejemplo, si tengo los cuatro corners con comida y estoy en una posición \n aleatoria, la lista newListOfCorners estará llena. Se calculará la distancia a cada corner y el corner que de la \n mínima será extraído de newListOfCorners. Entonces se calculará la distancia desde este corner hasta los restantes\n tres corners de newListOfCorners y el corner de esos tres que me de la mínima será extraído de la lista. Etc...\n \"\"\"\n\n # Ordenamos la lista de True's y False's para que vaya acorde con el orden de la lista corners:\n visitedCorners = []\n visitedCorners.append(state[1][1][0])\n visitedCorners.append(state[1][0][0])\n visitedCorners.append(state[1][1][1])\n visitedCorners.append(state[1][0][1])\n corners = list(corners) # De aquí saco una lista que contenga los corners ordenados.\n # Ahora los corners y la lista de visitedCorners contendrán la información de forma ordenada y coherente\n minimum = 9999999999999999 # Defino un mínimo muy grande para asegurarme que nunca sea superado\n total = 0 # Inicializo el total a cero\n newListOfCorners = [] # Creo una nueva lista para añadir los corners no estudiados\n for corner in corners: # Primero vamos a llenar la lista de corners con los que me interesen: los que tienen comida\n if visitedCorners[corners.index(corner)]: # Miramos que el corner tenga comida, sino pasamos\n newListOfCorners.append(corner) # Si tiene comida, lo añadimos\n minimCorner = corners[0] # Inicializo el minimCorner a un corner aleatorio para que no me de problemas más tarde\n actualState = state[0] # Lo mismo\n\n while not len(newListOfCorners) == 0: # Mientras la lista no esté vacía...\n for corner in newListOfCorners: # Cogemos un corner de la lista\n distanceToCorner = manhattanHeuristicToCorners(actualState, corner) # Calculamos dist. a corner\n if distanceToCorner < minimum: # Calculamos el mínimo\n minimum = distanceToCorner\n minimCorner = corner\n total += minimum # Y lo añadimos al total\n actualState = minimCorner # Reactualizamos cada variable para volver a empezar el bucle\n minimum = 9999999999999999999999999999999\n newListOfCorners.remove(minimCorner)\n return total",
"def astar_multi(maze):\n # TODO: Write your code here\n gFunction = {}\n frontier = PriorityQueue()\n path = []\n ret = []\n MSTLengths = {}\n edges = {}\n\n objectives = maze.getObjectives()\n start = State(maze.getStart()[0], maze.getStart()[1], objectives)\n gFunction[start] = 0\n frontier.put(start) \n getEdgeWeights(maze, objectives, edges) # init edge weights for MST\n\n while not frontier.empty():\n\n currentState = frontier.get()\n currentCell = currentState.cell()\n objectivesLeft = currentState.objectives()\n\n if objectivesLeft.count(currentCell) != 0:\n objectivesLeft.remove(currentCell)\n\n # all objectives found, initialise backtrace and exit loop\n if len(objectivesLeft) == 0:\n path.clear()\n ret.clear()\n path.append(currentState)\n ret.append(currentCell)\n break\n \n # if we have already calculated MST length we can reuse value\n # else calculate MST length for this state and store it.\n length = 0\n if str(objectivesLeft) in MSTLengths:\n length = MSTLengths[str(objectivesLeft)]\n else:\n length = getMSTLength(objectivesLeft.copy(), maze, edges)\n MSTLengths[str(objectivesLeft)] = length\n\n neighbors = maze.getNeighbors(currentCell[0], currentCell[1])\n\n for i in neighbors:\n\n neighbor = State(i[0], i[1], objectivesLeft)\n gVal= gFunction[currentState] + 1\n\n if neighbor not in gFunction or gVal < gFunction[neighbor]:\n\n neighbor.setParent(currentState)\n gFunction[neighbor] = gVal\n\n hFunction = []\n for j in objectivesLeft:\n hFunction.append(abs(j[0] - i[0]) + abs(j[1] - i[1]) + length) # use MST length + manhatten distance to nearest objective as heuristic.\n\n hVal = min(hFunction)\n\n neighbor.setfFunction(gFunction[neighbor] + hVal)\n frontier.put(neighbor)\n\n # backtrace\n while path[0]!= start:\n \n currentCell = path[0]\n path.insert(0, currentCell.parent())\n ret.insert(0, currentCell.parent().cell())\n\n return ret",
"def uniform_cost_search(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n fringe = util.PriorityQueue()\r\n path = set()\r\n final = []\r\n acts = dict()\r\n state = problem.get_start_state()\r\n fringe.push(state, 0)\r\n\r\n while (True):\r\n state = fringe.pop()\r\n path.add(state)\r\n states = problem.get_successors(state)\r\n acts[state] = states[:]\r\n if problem.is_goal_state(state):\r\n break\r\n\r\n #states = problem.get_successors(state)\r\n # push into fringe\r\n for stat in states:\r\n if stat[0] not in path:\r\n fringe.push(stat[0], stat[1].piece.get_num_tiles()) #problem.get_cost_of_actions([stat[1]])\r\n\r\n while (True):\r\n if state == problem.get_start_state():\r\n break\r\n for key, val in acts.items():\r\n for va in val:\r\n if va[0] == state:\r\n final.append(va[1])\r\n state = key\r\n break\r\n else:\r\n continue\r\n break\r\n\r\n final.reverse()\r\n\r\n return final",
"def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n \"\"\"\n same as UCS function, but total cost is sum of cost till now , cost to the child node and \n cost to the goal state (heuristic function)\n \"\"\"\n fringes = util.PriorityQueue()\n explored =set()\n fringes.push((problem.getStartState(),[]),0)\n\n while(not fringes.isEmpty()):\n currentNode,currDir = fringes.pop()\n if problem.isGoalState(currentNode):\n finalPath = currDir\n break\n # print \"HOraaay goal has been found === > \", currentNode\n\n if not (currentNode in explored):\n explored.add(currentNode)\n for childNode in problem.getSuccessors(currentNode):\n totalCost = (childNode[2] + heuristic(childNode[0],problem)+problem.getCostOfActions(currDir))\n fringes.push((childNode[0],currDir+[childNode[1]]),totalCost)\n\n\n return finalPath\n\n\n\n\n\n\n\n util.raiseNotDefined()",
"def getMove(self, grid):\n# global prune\n# prune = 0\n def Terminal(stateTup):\n \"\"\"\n Checks if the node is a terminal node\n Returns eval(state) if it is terminal\n \"\"\"\n state = stateTup[0]\n maxDepth = self.depthLimit\n if stateTup[1] == maxDepth:\n val = self.h.get(str(state.map))\n if val == None:\n Val = Eval(state)\n self.h[str(state.map)] = Val\n return Val\n else:\n return val\n elif len(stateTup[0].getAvailableMoves()) == 0:\n val = self.h.get(str(state.map))\n if val == None:\n Val = Eval(state)\n self.h[str(state.map)] = Val\n return Val\n else:\n return val\n\n def Eval(state):\n \"\"\"\n This is the eval function which combines many heuristics and assigns\n weights to each of them\n Returns a single value\n \"\"\"\n\n# H1 = htest2(state)\n# return H1\n H2 = h1(state)*monotonic(state)\n return H2\n\n\n def h1(state):\n Max = state.getMaxTile()\n left = len(state.getAvailableCells())/16\n if state.getCellValue([0,0]) == Max:\n v = 1\n else:\n v= 0.3\n Max = Max/1024\n return Max*left*v\n\n def mono(state):\n mon = 0\n# for i in range(4):\n# row = 0\n# for j in range(3):\n# if state.map[i][j] > state.map[i][j+1]:\n# row+=1\n# if row == 4:\n# mon += 1\n# for i in range(4):\n# column = 0\n# for j in range(3):\n# if state.map[j][i] > state.map[j+1][i]:\n# column +=1\n# if column == 4:\n# mon +=1\n#\n#\n# return mon/8\n for i in range(4):\n if all(earlier >= later for earlier, later in zip(grid.map[i], grid.map[i][1:])):\n mon+=1\n\n return mon/8\n\n def monotonic(state):\n cellvals = {}\n Path1 = [(3,0),(3,1),(3,2),(3,3),(2,3),(2,2),(2,1),(2,0),\n (1,0),(1,1),(1,2),(1,3),(0,3),(0,2),(0,1),(0,0)]\n for i in Path1:\n cellvals[i] = state.getCellValue(i)\n mon = 0\n for i in range(4):\n if cellvals.get((i,0)) >= cellvals.get((i,1)):\n if cellvals.get((i,1)) >= cellvals.get((i,2)):\n if cellvals.get((i,2)) >= cellvals.get((i,3)):\n mon +=1\n for j in range(4):\n if cellvals.get((0,j)) >= cellvals.get((1,j)):\n if cellvals.get((1,j)) >= cellvals.get((2,j)):\n if cellvals.get((2,j)) >= cellvals.get((3,j)):\n mon+=1\n return mon/8\n\n\n\n def htest2(state):\n score1 = 0\n score2 = 0\n r = 0.5\n\n Path1 = [(3,0),(3,1),(3,2),(3,3),(2,3),(2,2),(2,1),(2,0),\n (1,0),(1,1),(1,2),(1,3),(0,3),(0,2),(0,1),(0,0)]\n Path2 = [(3,0),(2,0),(1,0),(0,0),(0,1),(1,1),(2,1),(3,1),\n (3,2),(2,2),(1,2),(0,2),(0,3),(1,3),(2,3),(3,3)]\n valDict = {}\n for n in range(16):\n valDict[Path1[n]] = state.getCellValue(Path1[n])\n for n in range(16):\n if n%3 == 0:\n self.emergency()\n cell1 = valDict.get(Path1[n])\n cell2 = valDict.get(Path2[n])\n score1 += (cell1) * (r**n)\n score2 += (cell2) * (r**n)\n return max(score1,score2)\n\n\n def Maximize(stateTup,A,B):\n \"\"\"\n Returns a tuple of state,eval(state)\n Takes in a stateTup(tuple of grid + depth of the grid), alpha,\n and beta\n \"\"\"\n self.emergency()\n t = Terminal(stateTup)\n if t != None:\n return (None, t)\n\n maxChild , maxUtility = None,-999999999\n state = stateTup[0]\n Map = self.dict.get(str(state.map))\n if Map == None:\n children = []\n for M in range(4):\n g = state.clone()\n if g.move(M):\n children.append(g)\n self.dict[str(state.map)] = children\n else:\n children = Map\n for child in children:\n childTup = (child,stateTup[1]+1)\n utility = Minimize(childTup,A,B)[1]\n if utility > maxUtility:\n maxChild , maxUtility = child , utility\n if maxUtility >= B:\n# global prune\n# prune +=1\n break\n if maxUtility > A:\n A = maxUtility\n\n return (maxChild,maxUtility)\n\n\n def Minimize(stateTup,A,B):\n \"\"\"\n Returns a tuple of state,eval(state)\n Takes in a stateTup(tuple of grid + depth of the grid), alpha,\n and beta\n \"\"\"\n self.emergency()\n t = Terminal(stateTup)\n if t != None:\n return (None, t)\n\n minChild , minUtility = None,999999999\n state = stateTup[0]\n Map= self.dict.get(str(state.map))\n if Map == None:\n cells= state.getAvailableCells()\n children = []\n tiles = [2,4]\n for i in cells:\n for j in tiles:\n g = state.clone()\n g.insertTile(i,j)\n children.append(g)\n self.dict[str(state.map)] = children\n else:\n children = Map\n for child in children:\n childTup = (child,stateTup[1]+1)\n utility = Maximize(childTup,A,B)[1]\n if utility < minUtility:\n minChild , minUtility = child , utility\n if minUtility <= A:\n# global prune\n# prune +=1\n break\n if minUtility < B:\n B = minUtility\n\n return (minChild,minUtility)\n\n\n\n def decision(grid):\n \"\"\"\n Decision function which returns the move which led to the state\n \"\"\"\n child = Maximize((grid,0),-999999999,999999999)[0]\n Child = child.map\n g = grid.clone()\n for M in range(4):\n if g.move(M):\n if g.map == Child:\n # global prune\n # global pruneLog\n # pruneLog.append(prune)\n # print(prune)\n # print(sum(pruneLog)/len(pruneLog))\n return M\n g = grid.clone()\n\n self.dict = {}\n self.h = {}\n self.prevTime = time.clock()\n self.depthLimit = 1\n self.mL = []\n self.over = False\n while self.over == False:\n self.depthLimit +=1\n try :\n self.mL.append(decision(grid))\n\n except KeyError:\n# print(self.depthLimit)\n return self.mL[-1]\n except IndexError:\n return random.randint(0,3)\n self.Alarm(time.clock())\n return self.mL[-1]",
"def astar(grid, heuristic):\r\n\r\n class MapNode:\r\n def __init__(self, cell, cost, parent):\r\n self.cell = cell\r\n self.cost = cost\r\n self.parent = parent\r\n\r\n @functools.total_ordering\r\n class FrontierElement:\r\n def __init__(self, cell, cost, parent, estimatedCost):\r\n self.node = MapNode(cell, cost, parent)\r\n self.estimatedCost = estimatedCost\r\n def __lt__(self, other):\r\n return self.estimatedCost < other.estimatedCost\r\n def __eq__(self, other):\r\n return self.estimatedCost is other.estimatedCost\r\n\r\n frontier = PriorityQueue()\r\n visitedNodes = set()\r\n frontier.put(FrontierElement(grid.getStart(), 0, None, 0))\r\n\r\n path = []\r\n\r\n while not frontier.empty():\r\n currentElement = frontier.get()\r\n grid.addVisited(currentElement.node.cell)\r\n visitedNodes.add(currentElement.node.cell)\r\n\r\n if currentElement.node.cell in grid.getGoals():\r\n currentNode = currentElement.node\r\n while currentNode is not None:\r\n path.insert(0, currentNode.cell)\r\n currentNode = currentNode.parent\r\n break\r\n\r\n for neighbor in grid.getNeighbors(currentElement.node.cell):\r\n neighborCoord = neighbor[0]\r\n\r\n if neighborCoord in visitedNodes:\r\n continue\r\n\r\n neighborCost = neighbor[1]\r\n cheapestGoal = min(grid.getGoals(), key=lambda goal: Vector2.fromCell(neighborCoord).squaredDistanceTo(Vector2.fromCell(goal)))\r\n\r\n cost = currentElement.node.cost + neighborCost\r\n\r\n frontier.put(FrontierElement(neighborCoord, cost, currentElement.node, cost + heuristic(neighborCoord, cheapestGoal)))\r\n\r\n grid.setPath(path)",
"def cornersHeuristic(state, problem):\n corners = problem.corners # These are the corner coordinates\n walls = problem.walls # These are the walls of the maze, as a Grid (game.py)\n distance = []\n for i in range(len(corners)):\n distance.append(fabs((corners[i][0] - state[0][0]) + (corners[i][1] - state[0][1])))\n \"*** YOUR CODE HERE ***\"\n return min(distance) # Default to trivial solution",
"def A_Star(start, goal, final_occupancy_grid):\n x, y = np.mgrid[0:45:1, 0:42:1]\n pos = np.empty(x.shape + (2,))\n pos[:, :, 0] = x;\n pos[:, :, 1] = y\n pos = np.reshape(pos, (x.shape[0] * x.shape[1], 2))\n coords = list([(int(x[0]), int(x[1])) for x in pos])\n\n # Define the heuristic:\n # h: dictionary containing the distance to goal ignoring obstacles for all coordinates in the grid (heuristic function)\n h = np.linalg.norm(pos - goal, axis=1)\n h = dict(zip(coords, h))\n\n # Check if the start and goal are within the boundaries of the map\n for point in [start, goal]:\n\n if point[0] < 0 and point[0] >= final_occupancy_grid.shape[0]:\n raise Exception('Start node/goal node is not contained in the map')\n\n if point[1] < 0 and point[1] >= final_occupancy_grid.shape[1]:\n raise Exception('Start node/goal node is not contained in the map')\n\n # check if start and goal nodes correspond to free spaces\n if final_occupancy_grid[start[0], start[1]]:\n raise Exception('Start node is not traversable')\n\n if final_occupancy_grid[goal[0], goal[1]]:\n raise Exception('Goal node is not traversable')\n\n # get the possible movements\n movements = _get_movements_8n()\n\n # The set of visited nodes that need to be (re-)expanded, i.e. for which the neighbors need to be explored\n # Initially, only the start node is known.\n openSet = [start]\n\n # The set of visited nodes that no longer need to be expanded.\n closedSet = []\n\n # For node n, cameFrom[n] is the node immediately preceding it on the cheapest path from start to n currently known.\n cameFrom = dict()\n\n # For node n, gScore[n] is the cost of the cheapest path from start to n currently known.\n gScore = dict(zip(coords, [np.inf for x in range(len(coords))]))\n gScore[start] = 0\n\n # For node n, fScore[n] := gScore[n] + h(n). map with default value of Infinity\n fScore = dict(zip(coords, [np.inf for x in range(len(coords))]))\n fScore[start] = h[start]\n\n # while there are still elements to investigate\n while openSet != []:\n\n # the node in openSet having the lowest fScore[] value\n fScore_openSet = {key: val for (key, val) in fScore.items() if key in openSet}\n current = min(fScore_openSet, key=fScore_openSet.get)\n del fScore_openSet\n\n # If the goal is reached, reconstruct and return the obtained path\n if current == goal:\n return reconstruct_path(cameFrom, current)\n\n openSet.remove(current)\n closedSet.append(current)\n\n # for each neighbor of current:\n for dx, dy, deltacost in movements:\n\n neighbor = (current[0] + dx, current[1] + dy)\n\n # if the node is not in the map, skip\n if (neighbor[0] >= final_occupancy_grid.shape[0]) or (neighbor[1] >= final_occupancy_grid.shape[1]) or (\n neighbor[0] < 0) or (neighbor[1] < 0):\n continue\n\n # if the node is occupied, skip\n if (final_occupancy_grid[neighbor[0], neighbor[1]]):\n continue\n\n # if the has already been visited, skip\n if (neighbor in closedSet):\n continue\n # d(current,neighbor) is the weight of the edge from current to neighbor\n # tentative_gScore is the distance from start to the neighbor through current\n tentative_gScore = gScore[current] + deltacost\n\n if neighbor not in openSet:\n openSet.append(neighbor)\n\n if tentative_gScore < gScore[neighbor]:\n # This path to neighbor is better than any previous one. Record it!\n cameFrom[neighbor] = current\n gScore[neighbor] = tentative_gScore\n fScore[neighbor] = gScore[neighbor] + h[neighbor]\n\n # Open set is empty but goal was never reached\n print(\"No path found to goal\")\n return []",
"def manhattan_heuristic(state):\n man_h = 0\n size = len(state)\n for i in range (size):\n for j in range (size):\n if state[i][j] == 0:\n continue\n else:\n man_h = man_h + abs(i - int(state[i][j]/3)) + abs(j - (state[i][j])%3)\n return man_h",
"def dijkstras(occupancy_map,x_spacing,y_spacing,start,goal):\n ROWS, COLS = occupancy_map.shape\n #convert physical location to index in the grid\n startNode = locToIndex(start, x_spacing, y_spacing)\n startingNodeLoc = indexToLoc(startNode, x_spacing, y_spacing)\n initialcost = math.sqrt((startingNodeLoc[0] - start[0])**2 + (startingNodeLoc[1] - start[1])**2)\n goalNode = locToIndex(goal, x_spacing, y_spacing)\n \n freelist = np.where(occupancy_map == 0)\n if occupancy_map[startNode[0], startNode[1]] != 0:\n #raise ValueError(\"start : ({}, {}) invalid, is an obstacle\".format(startNode[0], startNode[1]))\n startNode = findValidNode(startNode, start, occupancy_map, x_spacing, y_spacing)\n if occupancy_map[goalNode[0], goalNode[1]] != 0:\n #raise ValueError(\"goal: ({}, {}) invalid, is an obstacle\".format(goalNode[0], goalNode[1]))\n goalNode = findValidNode(goalNode, goal, occupancy_map, x_spacing, y_spacing)\n candidate = [ [sys.float_info.max, \n i, (freelist[0][i], freelist[1][i])] for i in range(len(freelist[0]))] \n visited = set([])\n queue = PriorityQueue(candidate)\n paths = {}\n found = False\n\n #update initial cost\n queue.remove(startNode)\n queue.insert(startNode, initialcost)\n paths[startNode] = None\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, 0, 1, queue, paths, x_spacing, y_spacing, initialcost)\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, 0, -1, queue, paths, x_spacing, y_spacing, initialcost)\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, 1, 0, queue, paths, x_spacing, y_spacing, initialcost)\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, -1, 0, queue, paths, x_spacing, y_spacing, initialcost)\n while queue.size() > 0:\n priority, current = queue.pop()\n if current == goalNode:\n found = True\n break\n #not reaching goal node yet, for each of its neighbor, update the weight\n visited.add(current)\n update(occupancy_map, ROWS, COLS, current, 0, 1, priority, queue, paths, visited, x_spacing, y_spacing)\n update(occupancy_map, ROWS, COLS, current, 0, -1, priority, queue, paths, visited, x_spacing, y_spacing)\n update(occupancy_map, ROWS, COLS, current, 1, 0, priority, queue, paths, visited, x_spacing, y_spacing)\n update(occupancy_map, ROWS, COLS, current, -1, 0, priority, queue, paths, visited, x_spacing, y_spacing)\n \n if not found:\n raise ValueError(\"fail to find shortest path\")\n node = goalNode\n shortestpath = []\n while node is not None:\n shortestpath.append(node)\n node = paths[node]\n #shortestpath.append(startNode)\n #print (startNode)\n #print ('*', list(reversed(shortestpath)))\n #print (goalNode)\n p = list(reversed([ indexToLoc(n, x_spacing, y_spacing) for n in shortestpath]))\n #start and final position may not fall on center of the grid\n if abs(p[0][0] - start[0]) > 0.0005 or abs(p[0][1] - start[1]) > 0.0005:\n p.insert(0, [start[0][0], start[1][0]])\n if abs(p[-1][0] - goal[0]) > 0.0005 or abs(p[-1][1] - goal[1]) > 0.0005:\n p.append([goal[0][0], goal[1][0]])\n res = np.array(p)\n print (res)\n return res",
"def fast(maze):\n # TODO: Write your code here\n pq = []\n visited = {}\n\n goals = maze.getObjectives()\n goals_pq = new_pq(maze, goals, maze.getStart())\n\n f, curr_goal = heapq.heappop(goals_pq)\n heapq.heappush(pq, (f, [maze.getStart()]))\n\n while len(pq) > 0:\n curr_path = heapq.heappop(pq)[1]\n curr = curr_path[-1]\n\n if curr in visited:\n continue\n heuristic = closest(maze, curr, curr_goal)\n\n f = heuristic + len(curr_path) - 1\n visited[curr] = f\n if curr in goals:\n goals.remove(curr)\n if len(goals) == 0:\n return curr_path\n else:\n # print(\"before\")\n # print(curr_goal)\n goals_pq = new_pq(maze, goals, curr)\n f, curr_goal = heapq.heappop(goals_pq)\n # print(\"after\")\n # print(curr_goal)\n pq = []\n heapq.heappush(pq, (f, curr_path))\n visited.clear()\n continue\n for item in maze.getNeighbors(curr[0], curr[1]):\n heuristic = closest(maze, item, curr_goal)\n new_f = heuristic + len(curr_path) - 1\n if item not in visited:\n heapq.heappush(pq, (new_f, curr_path + [item]))\n else: # checks if overlap has smaller f\n if new_f < visited[item]:\n visited[item] = new_f\n heapq.heappush(pq, (new_f, curr_path + [item]))\n return []",
"def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n\n start = problem.getStartState()\n frontier = util.PriorityQueue() # in heap stored as ( cost,priority,location)\n frontier.push(start, 0)\n explored = []\n\n location = 0 # to remember which successor part im accessing\n action = 1\n heap_location = 2\n cost = 2\n\n history = []\n total_cost = 0 # need something to process total path cost\n\n while not frontier.isEmpty():\n\n current_position = frontier.pop()\n if problem.isGoalState(current_position):\n break\n if current_position not in explored:\n explored.append(current_position)\n else:\n continue\n\n for path in problem.getSuccessors(current_position):\n # if path[location] not in explored: # hasn't been expanded from\n if path[location] not in [item[heap_location] for item in frontier.heap]: # if not in frontier\n # print(\"valid successor (no frontier)\", each_successor[location])\n\n for entry in history:\n if entry['To'] == current_position:\n total_cost = entry['Cost']\n heuristic_cost = total_cost + heuristic(path[location], problem)\n frontier.push(path[location], path[cost] + total_cost + heuristic_cost)\n history.append({'From': current_position, 'To': path[location], 'By': path[action],\n 'Cost': total_cost + path[cost]})\n else:\n # print(\"in frontier\")\n for entry in history:\n if entry['To'] == current_position:\n total_cost = entry['Cost']\n frontier.update(path[location], total_cost + path[cost])\n # should prob add something that goes through history and wipes old entry for that point\n for entry in history:\n if entry['To'] == path[location] and entry['Cost'] > total_cost + path[cost]:\n history.remove(entry)\n history.append({'From': current_position, 'To': path[location], 'By': path[action],\n 'Cost': total_cost + path[cost]})\n break\n while not problem.isGoalState(history[-1]['To']): # loop removes last couple of movements which don't lead to goal\n history.remove(history[-1])\n\n x = len(history)\n while x - 1 != 0: # loop clears out actions that dont come from previous position\n if history[x - 1]['From'] != history[x - 2]['To']: # starts from goal and works backwards\n history.remove(history[x - 2])\n x = len(history)\n else:\n x -= 1\n\n return [path['By'] for path in history]"
] |
[
"0.7437132",
"0.7243507",
"0.7075924",
"0.7064572",
"0.6833929",
"0.6753228",
"0.6737725",
"0.64962655",
"0.6481526",
"0.6475066",
"0.6400677",
"0.63954955",
"0.63701344",
"0.6343227",
"0.63267416",
"0.63048136",
"0.6302781",
"0.6302682",
"0.6297269",
"0.6265315",
"0.62526554",
"0.62487996",
"0.6243192",
"0.6236099",
"0.62299854",
"0.6227819",
"0.6213307",
"0.62056553",
"0.62024444",
"0.6195679"
] |
0.8600155
|
0
|
Generate the header string for this description If the description is empty, return an empty string. Otherwise, the raw data is joined together and returned with no '' components.
|
def to_header(self):
if not self.filled:
return ''
return "\n".join(self.data)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _header_string( self, title='title' ): \n return_str = ''\n return_str += '{}\\n\\n'.format( title )\n return_str += '{} atoms\\n'.format( len(self.atoms) )\n if len(self.bonds) != 0:\n return_str += '{} bonds\\n\\n'.format( len(self.bonds) )\n return_str += '{} atom types\\n'.format( len(self.atom_types ) )\n if len(self.bond_types) != 0:\n return_str += '{} bond types\\n\\n'.format( len(self.bond_types ) )\n return_str += '\\n'\n return return_str",
"def generate_header(self, header=None):\n if header is None:\n header = self.header\n\n lines = [self.PREFIX_HEAD + '!b']\n for k, v in header.items():\n if k in ('labels', 'categories'):\n v = ', '.join(v)\n elif k == 'draft':\n v = repr(v)\n lines.append(self.HEADER_FMT % (k, v))\n lines.append(self.PREFIX_END)\n return '\\n'.join([_f for _f in lines if _f]) + '\\n'",
"def headerstring(self):\n sss = 'IVO LEGEND:\\n'\n sss += ' Created from 152 or 155\\n'\n sss += ' Pct number\\n'\n sss += ' Found in 152 (Y/N)\\n'\n sss += ' Found in 155 (Y/N)\\n'\n sss += ' Ivo serial number\\n'\n sss += ' PEB used for opening\\n'\n sss += ' Opening date/time\\n'\n sss += ' Date/time of first vote\\n'\n sss += ' PEB used for closing\\n'\n sss += ' Closing date/time\\n'\n sss += ' Date/time of last vote\\n'\n sss += ' Number of vote events 152\\n'\n sss += ' Number of vote events 155\\n'\n sss += ' Number of vote events 155 by precinct\\n'\n sss += ' Number of late vote events 152\\n'\n sss += ' Pct numbers\\n'\n sss += ' Ballot styles\\n'\n sss += ' Memory collection times\\n'\n return sss",
"def construct_header(self): \n \n # create the individual labels\n hdr_bits = [hb.format(hdr) for hb, hdr in zip(self.row_base, self.headers)]\n \n # stick it all together and return with hdr_sep underneath\n hdr_str = f\"|{'|'.join(hdr_bits)}|\\n\"\n return hdr_str + self.hdr_sep * (len(hdr_str)-1) + \"\\n\"",
"def header_text(self):\n return os.linesep.join(map(str, self.headers))",
"def header(self):\n return encode_as_str([self.unsealed_header(), self.seal_data], sep='`')",
"def header(self):\n return encode_as_str([self.unsealed_header(), self.seal_data], sep='`')",
"def header(self) -> str:\n value = self.kind\n if self.options:\n value += '; ' + '; '.join(f'{k}={v}' for k, v in self.options.items())\n return value",
"def table_header(self):\n title = 'HYPERPARAMETER FINE-TUNING RESULTS'\n title_len = len(title)\n extra_spaces = self.max_length - title_len\n left_spaces = extra_spaces // 2\n right_spaces = extra_spaces - left_spaces - 1\n\n return '| ' + (left_spaces * ' ') + title + (right_spaces * ' ') + ' |\\n'",
"def header( self ):\n\t\treturn '; '.join( [ '='.join(i) for i in self.items() ] )",
"def BuildHeaderString (text):\r\n\r\n return t.BuildHeaderString (text)",
"def buildheader(self):\n \n lines = {}\n for k in self._d:\n lines[self._d[k]]='# %d %s'%(self._d[k],k.upper())\n #sort the new keys\n nkeys= lines.keys()\n nkeys.sort()\n #join them together with newlines\n ans = ''\n for k in nkeys:\n ans=ans+\"%s\\n\"%lines[k]\n return ans",
"def description(self):\n return self._hdr",
"def description(self):\n return self._hdr",
"def first_header():\n return \"\"\"\n<th>Target\n<th>Date\n<th colspan=\"2\">UT\n<th>Exp\n<th>Cycle\n<th>No. of\n<th>Filters\n<th>XxY\n<th>Speed\n<th>NX1xNY1\n<th>X1\n<th>Y1\n<th>NX2xNY2\n<th>X2\n<th>Y2\n<th>Grat.\n<th>Slit\n<th>Slit\n<th>ID\n<th>PI\n<th align=\"left\">Comment\n\"\"\"",
"def unsealed_header(self):\n return encode_as_str([self.height, self.timestamp, self.target, self.parent_hash, self.is_genesis, self.merkle], sep='`')",
"def unsealed_header(self):\n return encode_as_str([self.height, self.timestamp, self.target, self.parent_hash, self.is_genesis, self.merkle], sep='`')",
"def __str__(self):\n header_string = ''\n for key, value in self.define.items():\n header_string += '#define {} {}\\n'.format(key, self.format(value))\n return header_string",
"def createHeaderRecord(self):\n\n # ascii-character limit for every header record information (in bytes)\n lenVersion = 8\n lenLocalPatientID = 80\n lenLocalRecordingID = 80\n lenStartDate = 8\n lenStartTime = 8\n lennBytesHeader = 8\n lenEDFPlus = 44\n lennDataRecord = 8\n lenDurationDataRecord = 8\n lennSignals = 4\n \n HeaderInfolist = [self.Version, self.LocalPatientID, self.LocalRecordingID, self.StartDate, self.StartTime, self.nBytesHeader, self.EDFPlus,\\\n self.nDataRecord, self.DurationDataRecord, self.nSignals]\n lenHeaderInfo = [lenVersion, lenLocalPatientID, lenLocalRecordingID, lenStartDate, lenStartTime, lennBytesHeader, lenEDFPlus, lennDataRecord,\\\n lenDurationDataRecord, lennSignals]\n\n for i in range(len(HeaderInfolist)):\n maxlen = lenHeaderInfo[i]\n if len(HeaderInfolist[i]) > maxlen:\n # truncates the string if length is greater than limit\n HeaderInfolist[i] = HeaderInfolist[i][:maxlen] \n \n else:\n HeaderInfolist[i] = HeaderInfolist[i].ljust(maxlen)\n \n # converts the list to a string with no separator in between elements\n self.HeaderRecord = ''.join(HeaderInfolist) \n\n # concatenates each BioSignal TechInfo to the Header Record string\n for i in range(len(self.BioSignals[0].TechInfo)):\n for x in range(len(self.BioSignals)):\n self.HeaderRecord = self.HeaderRecord + self.BioSignals[x].TechInfo[i]",
"def get_header():\n title = \"\"\"\n ___ __\n | o _|_ _|_ _ ._ (_ _ ._ _|_ o ._ _ _ ._ _|_ /\\ ._ _. | _ o _\n | \\/\\/ | |_ |_ (/_ | __) (/_ | | |_ | | | | (/_ | | |_ /--\\ | | (_| | \\/ _> | _>\n /\"\"\"\n\n sub_title = \"Get sentiments from your tweets fast and easy!\"\n header = bcolors.HEADER + title + bcolors.ENDC + \"\\n\" + bcolors.WARNING + \"\\t\\t\" + sub_title + bcolors.ENDC + \"\\n\"\n return header",
"def design_report_header(self):\n rstr = nl() + \" \" + nl() + t('table border-collapse= \"collapse\" border=\"1px solid black\" width=100%') + nl()\n rstr += t('tr') + nl()\n row = [0, '<object type= \"image/PNG\" data= \"cmpylogoSeatAngle.png\" height=60 ></object>',\n '<font face=\"Helvetica, Arial, Sans Serif\" size=\"3\">Created with</font>' \" \" \" \" \" \" \" \" \" \" '<object type= \"image/PNG\" data= \"Osdag_header.png\" height=60 '' \" \" \" \" \" \" \"></object>']\n rstr += html_space(1) + t('td colspan=\"2\" align= \"center\"') + space(row[0]) + row[1] + t('/td') + nl()\n rstr += html_space(1) + t('td colspan=\"2\" align= \"center\"') + row[2] + t('/td') + nl()\n rstr += t('/tr') + nl()\n\n rstr += t('tr') + nl()\n rstr += design_summary_row(0, \"Company Name\", \"detail\", text_two=self.company_name, is_row=False)\n rstr += design_summary_row(0, \"Project Title\", \"detail\", text_two=self.project_title, is_row=False)\n rstr += t('/tr') + nl()\n\n rstr += t('tr') + nl()\n rstr += design_summary_row(0, \"Group/Team Name\", \"detail\", text_two=self.group_team_name, is_row=False)\n rstr += design_summary_row(0, \"Subtitle\", \"detail\", text_two=self.sub_title, is_row=False)\n rstr += t('/tr') + nl()\n\n rstr += t('tr') + nl()\n rstr += design_summary_row(0, \"Designer\", \"detail\", text_two=self.designer, is_row=False)\n rstr += design_summary_row(0, \"Job Number\", \"detail\", text_two=self.job_number, is_row=False)\n rstr += t('/tr') + nl()\n\n rstr += t('tr') + nl()\n rstr += design_summary_row(0, \"Date\", \"detail\", text_two=time.strftime(\"%d /%m /%Y\"), is_row=False)\n rstr += design_summary_row(0, \"Client\", \"detail\", text_two=self.client, is_row=False)\n rstr += t('/tr')\n rstr += t('/table') + nl() + \" \" + nl()\n\n rstr += t('hr')\n rstr += t('/hr') + nl() + \" \" + nl()\n return rstr",
"def _write_header(self, head_msg=None):\n now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n header = \"\\n%s\\nDateTime: %s \\nMessage: %s \\n\" % (\"*\" * 100, now, head_msg)\n\n return header",
"def build_header_1(self, header_len=b'\\x00\\x00\\x00\\x00', data_len=b'\\x00\\x00\\x00\\x00'):\n self.header_1 = b''\n header_1_dict = {'preamble': b'\\x50\\x4f',\n 'packet_type': b'\\x01\\x00\\x00\\x50',\n 'header_len': header_len + b'\\x02\\x00\\x00\\x00\\x00\\x00\\x00\\x00',\n 'data_len': data_len,\n 'agent_guid': b'{%s}' % self.agent_guid,\n 'agent_guid_padding': b'\\x00' * 90 + b'\\x01\\x00\\x00\\x00',\n 'agent_hostname': b'%s' % self.agent_hostname,\n 'hostname_padding': b'\\x00' * (32 - len(self.agent_hostname)) + b'\\x00' * 48}\n\n for item in header_1_dict:\n self.header_1 += header_1_dict[item]\n return self.header_1",
"def header():\n record = cfg.get_current_site_record()\n header = \"{0} ({1})\".format(record['url'], record['id'])\n size = len(header) + 2 + 2\n return \"\"\"{sep}\n# {header} #\n{sep}\"\"\".format(sep='#'*size, header=header)",
"def header(self, format=None):\n return [\" ID \",\n \"East\",\n \"North\",\n \"TARGET ELEV\",\n \" LENGTH\",\n \" AZ\",\n \" DIP\",\n \"PLAN ELEV\"]",
"def header(self, as_list=False, separator='\\t'):\n if not self.attrs():\n return None\n if as_list:\n return self.attrs()\n else:\n return separator.join(self.attrs())",
"def get_config_header(_config_global, _debug_log, _dpid, _hardware):\n return ''",
"def _make_header(title: str, category: int, description: str, slug: str, image_file_name: Optional[str] = None) -> str:\n\n current_date = _get_current_time()\n category = _get_category(category)\n social_image = SOCIAL_IMAGE_TEMPLATE.format(image_file_name) if image_file_name else \"\"\n header = HEADER_TEMPLATE.format(title, current_date, slug, category, description, social_image)\n\n if social_image:\n figure_template = FIGURE_TEMPLATE.format(social_image)\n header += figure_template\n\n return header",
"def build_markdown_header(title, date, author, categories, tags, slug,\r\n attachments=None):\r\n header = 'Title: %s\\n' % title\r\n if date:\r\n header += 'Date: %s\\n' % date\r\n if author:\r\n header += 'Author: %s\\n' % author\r\n if categories:\r\n header += 'Category: %s\\n' % ', '.join(categories)\r\n if tags:\r\n header += 'Tags: %s\\n' % ', '.join(tags)\r\n if slug:\r\n header += 'Slug: %s\\n' % slug\r\n if attachments:\r\n header += 'Attachments: %s\\n' % ', '.join(attachments)\r\n header += '\\n'\r\n return header",
"def get_header(self, title):\n self.header = '<!DOCTYPE html>' \\\n '<html>' \\\n '<head>' \\\n '<title>Harm Brugge - ' + title + '</title>' \\\n '<link rel=\"icon\" href=\"../resources/img/dna.png\"/>' \\\n '<link href=\"../resources/css/bootstrap.min.css\" rel=\"stylesheet\">' \\\n '<link href=\"../resources/css/main.css\" rel=\"stylesheet\">' \\\n '<script type=\"text/javascript\" src=\"../resources/js/jquery.js\"></script>' \\\n '<script src=\"../resources/js/bootstrap.min.js\"></script>' \\\n '<script type=\"text/javascript\" src=\"../resources/js/bootbox.min.js\"></script>' \\\n '</head>' \\\n '<body>' \\\n '<div class=\"container shadow\">' \\\n '<div class=\"logo\">' \\\n '<h1></h1>' \\\n '</div>' \\\n '<br/>' \\\n '<div class=\"row content\">' \\\n '<div class=\"content-main\">' \\\n '<br/>' \\\n '<p class=\"lead content-title\">' + title + '</p>'\n return self.header"
] |
[
"0.73597187",
"0.72831184",
"0.71944606",
"0.7154044",
"0.6919942",
"0.6901166",
"0.6901166",
"0.675267",
"0.66368526",
"0.66322434",
"0.66226566",
"0.65845186",
"0.658279",
"0.658279",
"0.6578081",
"0.6538257",
"0.6538257",
"0.6524499",
"0.65011746",
"0.641547",
"0.63942367",
"0.6302408",
"0.628779",
"0.6277615",
"0.6227234",
"0.62013006",
"0.62003803",
"0.6200267",
"0.6196637",
"0.61892676"
] |
0.7559777
|
0
|
x_max = max(seq) x_min = min(seq) epilson = 1e6 new_seq = [10000 (epilson + x x_min )/(epilson + x_max x_min) for x in seq]
|
def normalization(seq):
new_seq = [6.3578286171 * x for x in seq]
return new_seq
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def geo_seq(val, ratio, length):\n return [val * pow(ratio, i) for i in range(length)]",
"def power_e(amount, start, stop, truncated, sequence):\n ratio = .5\n for x in range(start, amount):\n y = abs(round(ratio * math.exp(x)))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence",
"def power_em1(amount, start, stop, truncated, sequence):\n ratio = .25\n for x in range(start, amount):\n y = abs(round(ratio * math.expm1(x)))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence",
"def buildAbs(maxVal):\n return [5*i for i in range(floor(maxVal/5)+1)]",
"def log(amount, start, stop, truncated, sequence):\n ratio = 10 ** (len(str(start)) + 1)\n for x in range(start, amount):\n # y = abs(round(math.log(x, 1)))\n y = abs(round(math.log1p(x) * ratio * 5))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence",
"def squares(amount, start, stop, truncated, sequence):\n for x in range(start, amount):\n y = x * x\n if truncated and y >= stop:\n sequence.append(stop)\n else:\n sequence.append(y)\n return sequence",
"def generate_eps(T_low, T_high, n_e, factor = 10):\n \n E_f = E_fermi (n_e)\n eps_min = E_f - factor * T_high\n eps_max = E_f + factor * T_high\n eps_step = T_low / factor\n \n return np.arange (eps_min, eps_max+eps_step, eps_step)",
"def hyperbolic_sine(amount, start, stop, truncated, sequence):\n ratio = 1\n for x in range(start, amount):\n y = abs(round(ratio * math.sinh(x)))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence",
"def get_range(min, max, intervals, log):\n if not log:\n min = float(min)\n max = float(max)\n difference = max-min\n step_size = difference/intervals\n output = [min + i*step_size for i in range(intervals+1)]\n return output\n else:\n from math import log10 as log\n log_min = log(min)\n log_max = log(max)\n log_difference = log_max - log_min\n step_size = log_difference/intervals\n output = [pow(10, log_min + i*step_size) for i in range(intervals+1)]\n return output",
"def _gser(a, x, eps=3.e-7, itmax=700):\n if x == 0.0:\n return 0.0\n ap = a\n sum = 1. / a\n delta = sum\n n = 1\n while n <= itmax:\n ap = ap + 1.\n delta = delta * x / ap\n sum = sum + delta\n if (abs(delta) < abs(sum) * eps):\n return (sum * np.exp(-x + a * np.log(x)))\n n = n + 1\n raise RuntimeError(\"Maximum iterations exceeded in gser\")",
"def euler_scheme(f_function, initial_value, start_point=0, division=100, end_interval=1):\n result_points = [initial_value]\n h_len = 1 / division\n for step in range(division * (end_interval - start_point)):\n result_points.append(result_points[step] + h_len *\n f_function(start_point + step * h_len, result_points[step]))\n return np.array(result_points)",
"def _Ep(self):\n return np.logspace(np.log10(self.Epmin.to('GeV').value),np.log10(self.Epmax.to('GeV').value),\n self.nEpd * (np.log10(self.Epmax/self.Epmin)))",
"def inverse_hyperbolic_sine(amount, start, stop, truncated, sequence):\n ratio = (start + stop) / 5\n for x in range(start, amount):\n y = abs(round(ratio * math.asinh(x)))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence",
"def eV(E):\n if np.max(E) < 100:\n return E * 1000\n else:\n return E",
"def math_map_list(values, toMin=0, toMax=1):\n minValue = min(values)\n maxValue = max(values)\n delta = maxValue - minValue\n deltaTarget = toMax - toMin\n newValues = [toMin +(value-minValue)*deltaTarget/delta for value in values]\n return newValues",
"def power(amount, start, stop, truncated, sequence):\n ratio = len(str(start)) + 1\n for x in range(start, amount):\n y = abs(round(ratio ** x))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence",
"def seq_sqrt(xs):\n num_list = []\n for xs_split in xs:\n print(xs)\n xs_num = int(xs_split)\n print(xs_num)\n xs_squrt = math.sqrt(xs_num)\n print(xs_squrt)\n num_list.append(xs_squrt)\n return num_list",
"def fraction(amount, start, stop, truncated, sequence):\n ratio = stop\n for x in range(start, amount):\n y = abs(round(ratio / (abs(x) + 1)))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence",
"def xi(a):\n return xrange(len(a))",
"def yield_spectral_range(self) -> Tuple[float, float, float]:\n return [min(self.x), max(self.x), len(self.x)]",
"def softplus_list(x_):\n y_ = [np.log(1 + np.exp(-np.abs(x_[0]))) + np.maximum(x_[0], 0)]\n for i in range(1, len(x_)):\n if x_[i] is not []:\n y_ = y_ + [np.log(1 + np.exp(-np.abs(x_[i]))) + np.maximum(x_[i], 0)]\n return y_",
"def sines(amount, start, stop, truncated, sequence):\n\n for x in range(start, amount):\n y = abs(round(stop * math.sin(x)))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence",
"def genvals():\n vals = np.empty(200)\n vals[:50] = np.arange(50) / 50\n vals[50:100] = (50 - np.arange(50)) / 50\n vals[100:] = -vals[:100]\n return vals",
"def desp_inicial(x): #Definición del desplazamiento inicial de la cuerda\r\n return np.exp(-1000*(x - longitud/2)**2)",
"def EGWD_fg(f):\n A = 4.2e-47\n res = np.zeros((len(f)))\n for i,freq in enumerate(f): \n if freq >=3e-3:\n # strain \n res[i] = A * freq**(-7/3) * np.exp(-2*(freq/5e-2)**2) \n else:\n res[i] = np.NaN\n return np.array(res)",
"def energy_to_lambda(energy_ev=[]):\n energy_mev = energy_ev * 1000\n lambda_array = np.sqrt(81.787 / energy_mev)\n return lambda_array",
"def smoothed(sequence, step=1, start=0):\n next_index = start + 1\n last = len(sequence) \n new_sequence = []\n if not step:\n return sequence\n ratio_step = step + 1\n for item in sequence:\n new_sequence.append(item)\n if next_index < last:\n next_item = sequence[next_index]\n ratio = (item + next_item) / (step + 1)\n ratio = int(ratio)\n for x in range(step):\n value = (ratio * x) + item\n new_sequence.append(int(value))\n next_index = next_index + 1\n return new_sequence",
"def regular(step, start=0.):\n\n def output(low, high):\n newstart = math.ceil((low - start)/step) * step + start\n return numpy.arange(newstart, high, step, dtype=numpy.float)\n output.func_name = \"regular(%g, start=%g)\" % (step, start)\n return output",
"def normalization(x, x_min=-5.12, x_max=5.12):\n for i in range(len(x.vect)):\n x.vect[i] = x_min + x.vect[i]*(x_max-x_min)\n return x",
"def define_intervals(self):\n i = 5 # a step of increment\n interval_sum = self.min_step\n interval_list = [self.min_step]\n while interval_sum < self.max_step:\n interval_sum += i\n interval_list.append(interval_sum)\n # interval_list.append(self.max_step)\n # print(\"Intervals\", interval_list)\n return interval_list"
] |
[
"0.6177466",
"0.61661714",
"0.5975783",
"0.5779353",
"0.56950027",
"0.56275076",
"0.5592511",
"0.5567296",
"0.5561729",
"0.54725146",
"0.5414097",
"0.5412501",
"0.54114294",
"0.53760564",
"0.5344241",
"0.5343695",
"0.53412575",
"0.53345144",
"0.5287912",
"0.5285122",
"0.5279148",
"0.5275514",
"0.52423006",
"0.52398574",
"0.5227902",
"0.52255356",
"0.5225248",
"0.51998794",
"0.51989126",
"0.5198228"
] |
0.6463649
|
0
|
Write array to a file as text or binary (default).
|
def quick_save_array(data, file_name, delimiter=',', ):
data.tofile(file_name, sep=delimiter)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def save_txt(data, file_path):\n array = sanitise_array(data)\n\n # If the data is floating then format the values in scientific notation.\n if np.issubdtype(array.dtype, np.floating):\n array = array.astype(np.float32)\n formatter = lambda x: f'{x:.12E}'\n elif np.issubdtype(array.dtype, np.integer):\n array = array.astype(np.int32)\n formatter = lambda x: str(x)\n else:\n raise TypeError(f'Type of the data could not be serialised - {array.dtype}')\n\n lines = [' '.join(formatter(val) for val in row) + '\\n' for row in array]\n with open(file_path, 'w') as f:\n f.writelines(lines)",
"def write_file(self):\n print 'Writing '+self.name+' binary...'\n if self.vals is not None:\n if len(self.vals) == self.size:\n stream = self.pack_mem()\n with open(self.name+'.bin','wb') as f:\n f.write(stream)\n print 'File written: '+self.name+'.bin'\n else:\n print 'Error: input array for '+self.name+'is not the right '+\\\n 'size (should be '+str(self.size)+'). Skipping.'\n else:\n print 'No array provided, skipping.'",
"def print_to_file(arr, fid, sep=\"\", format=\"%s\"):\n\n f = array_create.array(arr, bohrium=False)\n return f.tofile(fid, sep=sep, format=format)",
"def ArraytoFile(_array):\n\tfile = open('sort1.txt', 'w')\n\tfor line in _array:\n\t\tfile.write(line+\"\\n\")\n\tfile.close()",
"def save_array(array, filename):\n np.save(filename, array)",
"def write_output(arr, filename):\n print('Started writing the output..')\n f = open(filename, 'w')\n for a in arr:\n f.write(str(a) + '\\n')\n f.close()\n print('Done!, Open the file to see the approved loans.')",
"def write_text_file(data, file_name):\n\timport types\n\toutf = open(file_name, \"w\")\n\tif (type(data[0]) == types.ListType):\n\t\t# It is a list of lists\n\t\tfor i in xrange(len(data[0])):\n\t\t\tfor j in xrange(len(data)):\n\t\t\t\tif type(data[j][i]) == type(0):\n\t\t\t\t\toutf.write(\" %12d\"%data[j][i])\n\t\t\t\telse:\n\t\t\t\t\toutf.write(\" %12.5g\"%data[j][i])\n\t\t\toutf.write(\"\\n\")\n\telse:\n\t\t# Single list\n\t\tfor j in xrange(len(data)):\n\t\t\tif type(data[j]) == type(0):\n\t\t\t\toutf.write(\" %12d\\n\"%data[j])\n\t\t\telse:\n\t\t\t\toutf.write(\" %12.5g\\n\"%data[j])\n\toutf.close()",
"def cast_numpy_to_txt(arr, output_file):\n shape = arr.shape\n arr = arr.reshape([shape[0] * shape[1], shape[2]])\n\n np.savetxt(fname=output_file, X=arr, delimiter=' ', fmt='%.18e', newline='\\n', )",
"def writeArray(fname,arr):\n fh = open(fname,'w')\n fh.write('%d\\n' % arr.shape[0])\n fh.write('%d\\n' % arr.shape[1])\n for x in range(arr.shape[0]):\n for y in range(arr.shape[1]):\n if arr.dtype == np.complex:\n fh.write('%.7e %.7e\\n' % (arr[x,y].real, arr[x,y].imag))\n else:\n fh.write('%.7e\\n' % (arr[x,y]))\n fh.close()",
"def write(fname, data):\n # Encode to string.\n encoder = NumpyJSONEncoder(check_circular=True, indent=' ')\n serial = encoder.encode(data)\n\n # Write to file.\n with open(fname, 'w') as fo:\n fo.write(serial)",
"def write_csv_file(array, filename):\n\tnp.savetxt(filename, array, delimiter=\",\")",
"def write_to_txt(data, filename, attr='w'):\n f = open(filename, attr, encoding='utf-8', errors='ignore')\n for item in data:\n f.write(item.__str__())\n f.close()",
"def to_txt(self, fpath):\n np.savetxt(fpath, self._arr.T)",
"def save(file, arr, allow_pickle=True, fix_imports=True):\n\n return numpy.save(file, array_create.array(arr, bohrium=False), allow_pickle, fix_imports)",
"def binary_out(array, fnam, dt=np.dtype(np.float64), endianness='big', appendDim=False):\r\n if appendDim == True :\r\n fnam_out = fnam + '_'\r\n for i in array.shape[:-1] :\r\n fnam_out += str(i) + 'x' \r\n fnam_out += str(array.shape[-1]) + '.raw'\r\n else :\r\n fnam_out = fnam\r\n arrayout = np.array(array, dtype=dt)\r\n if sys.byteorder != endianness:\r\n arrayout.byteswap(True)\r\n arrayout.tofile(os.path.abspath(fnam_out))",
"def write_txt(data, out_path, type=\"w\"):\n with open(out_path, type) as f:\n f.write(data.encode(\"utf-8\"))",
"def writeList2File(filename, array, overwrite=False, separator=';'):\n mode = 'a'\n if overwrite:\n mode = 'w'\n file = open(filename, mode)\n file.write(separator.join(map(str,array)) + '\\n')",
"def numpy_2_file(narray, file, path=OUTPUT_PATH, sep=',' ):\n file_path = path + file\n narrayc = numpy.copy(narray)\n numpy.place(narrayc,numpy.logical_or(narrayc==-1,narrayc==-2), 2)\n dataset = numpy.copy(narrayc).astype(str)\n numpy.place(dataset,dataset=='2', '*')\n d=numpy.atleast_2d(dataset)\n numpy.savetxt(file_path, d, delimiter=sep, fmt='%s')\n return",
"def _write_array_on_file(self, pa_array):\n pa_batch = pa.RecordBatch.from_struct_array(pa_array)\n self._num_bytes += pa_array.nbytes\n self.pa_writer.write_batch(pa_batch)",
"def output_file(newarray, filename):\n np.savetxt(filename + \"_formatted.txt\", newarray, delimiter=\" \", fmt=\"%s\")",
"def write(data):",
"def _serialize_array(self, array):\n buffer = io.BytesIO()\n np.save(buffer, array)\n return buffer.getvalue()",
"def save(data, file, compression=0):\n f = file if isinstance(file, bob.io.base.HDF5File) else bob.io.base.HDF5File(file, 'w')\n if hasattr(data, 'save'):\n data.save(f)\n else:\n f.set(\"array\", data, compression=compression)",
"def data_to_file(data, ta_file):\n file_handle = file(ta_file, \"w\")\n file_handle.write(data_to_string(data))\n file_handle.close()",
"def pickle(array, file):\r\n\timport cPickle\r\n\tfo = open(file,'wb')\r\n\tcPickle.dump(array,fo)\r\n\tfo.close()",
"def create_output_file(arr):\r\n for i in arr:\r\n output_file.write(f'{i[0]}\\t{i[1]}\\n')",
"def save_array(self, name: str, array: np.ndarray):\r\n np.savetxt(self._path_for_csv(name), array, delimiter=\",\")",
"def save_bin(data, file_path):\n np.save(file_path, data)",
"def write( data ):",
"def write_to_file(filepath, data):\n\n with open(filepath, 'w') as f:\n f.write(str(data))"
] |
[
"0.7022295",
"0.66680944",
"0.6556973",
"0.65567935",
"0.6454246",
"0.64344746",
"0.6400705",
"0.6375197",
"0.6342715",
"0.62929696",
"0.6282288",
"0.62203467",
"0.61905587",
"0.61702776",
"0.611481",
"0.61135733",
"0.6070423",
"0.6067515",
"0.6062281",
"0.60030717",
"0.5993181",
"0.5991136",
"0.59821254",
"0.5973174",
"0.59561884",
"0.59361315",
"0.592615",
"0.59243363",
"0.59025913",
"0.5900634"
] |
0.6797532
|
1
|
Get the datetimes from the excel file
|
def get_datetimes(file_name):
csv_file = open(file_name, 'rb')
file_content = csv.reader(csv_file)
# ignore header
file_content.next()
datetimes = []
for row in file_content:
datetimes.append(row[0])
csv_file.close()
return datetimes
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_dates(file,start,end):\r\n \r\n data = format_data(file)\r\n data = data.loc[start:end,:] \r\n dates = list(data.index)\r\n \r\n return dates",
"def read_hours_from_worksheet(sheet_name):\n workbook_path = get_workbook_path()\n wb = openpyxl.load_workbook(workbook_path)\n ws = wb[sheet_name]\n\n # return a list of the datetime entries\n last_row = str(ws.max_row)\n dates_row = tuple(ws['A9':'A' + last_row])\n date_strings = []\n for row_of_cell_objs in dates_row:\n for date in row_of_cell_objs:\n date_strings.append(date.value)\n\n # return a list of the hours\n last_row = str(ws.max_row)\n hours_row = tuple(ws['B9':'B' + last_row])\n hours_list = []\n for row_of_cell_objs in hours_row:\n for hours in row_of_cell_objs:\n hours_list.append(hours.value)\n return date_strings, hours_list",
"def get_events(path, sheet_index=0, key=\"SF\"):\r\n\r\n wb = xlrd.open_workbook(path)\r\n sheet = wb.sheet_by_index(sheet_index)\r\n events = []\r\n\r\n # Scan the excel file for all cells that contanin the key (\"SF\") and return them\r\n for i in range(sheet.nrows):\r\n for j in range(sheet.ncols):\r\n if (sheet.cell_value(i, j) == 'Date'):\r\n date_row = i\r\n if (sheet.cell_value(i, j) == key):\r\n events.append([sheet.cell_value(i, 0), str(parser.parse(sheet.cell_value(date_row, j)).date())])\r\n\r\n return events",
"def get_raw_datetimes():\n raw_datetimes = []\n with open(RAW_DATETIMES_PATH, 'r') as f:\n for x in f.read().splitlines():\n try:\n raw_datetimes.append(datetime.datetime(year=int(x[1:5]), month=int(x[6:8]), day=int(x[9:11])))\n except ValueError:\n raw_datetimes.append('NA')\n return raw_datetimes",
"def getTimeseries_from_file(self, path, file_type):\n self.path = path\n if not file_type or file_type.lower() not in ['csv', 'pickle']:\n raise ValueError('Either pickle or csv must be true.')\n elif file_type=='pickle':\n temp_dataframe = pd.read_pickle(self.path)\n else:\n temp_dataframe = pd.read_csv(self.path, index_col=0)\n self.dataframe = cleanDates(temp_dataframe.reset_index())\n self.dataframe.set_index(['DateTime'], inplace=True)\n self.sort_df()\n self.last_entry_date = self.dataframe.index.values[-1]\n return self.dataframe",
"def date_list(self):\n if self._date_list is None or self._file_modified:\n with open(self.data_filepath, 'r', newline='') as reader:\n reader = csv.reader(reader)\n self._date_list = [DatePoint.unfreeze(date[0]) for date in reader]\n self._file_modified = False\n return self._date_list",
"def dates(self):\n #{{{ function to return start and end times for a station\n return self.wfdates.keys()",
"def get_exptimes( self ):\n return np.array([h['EXPTIME'] for h in self.headers])",
"def list_dates(product):\n\n if product == 'analysis_assim':\n files = _list_files(product)\n dates = []\n for f in files:\n date = _date_from_filename(f)\n dates.append(date)\n dates = list(set(dates)) # Get unique dates\n else:\n template = (HS_DATA_EXPLORER_URI + 'files_explorer/get-folder-contents'\n '/?selection_path=%2Fprojects%2Fwater%2Fnwm%2Fdata%2F{0}'\n '%3Ffolder&query_type=filesystem')\n if 'long_range' in product:\n product = 'long_range'\n uri = template.format(product)\n response = urlopen(uri).read()\n dates = re.findall(r'\\>([0-9]+)\\<', response)\n return sorted(dates)",
"def parse_data(filename):\n x, y = [], []\n with open(filename) as f:\n reader = csv.reader(f)\n for row in reader:\n x.append(datetime.strptime(row[1], DATE_FORMAT))\n y.append(row[0])\n\n return x, y",
"def Dates(self):\n data = self.DictData()\n dates = [ row[ \"Date\"] for row in data ]\n return dates",
"def _calc_date(time_lines) -> list:\n return time_lines[0].split()[0].strip(\"-\").split(\"-\")",
"def get_arterial(file_path,category):\n book = xlrd.open_workbook(file_path)\n file_name = os.path.basename(file_path)\n year = str(20) + \"\".join([str(s) for s in file_name if s.isdigit()]) ## gets the year from filename\n Month = strptime(file_name[2:5],'%b').tm_mon ## gets month no\n mydate = datetime.date(int(year),Month, 1) ## first day of the month and year\n mydate_1 = mydate - datetime.timedelta(days=1) ## interested in last month of this year as data corresponds to last month and same year\n mydate_2 = mydate - datetime.timedelta(days=368) ## interested in last month of last year as data corresponds to last month and last year \n #monthid1 = str(mydate_1.strftime(\"%Y\")) + str(mydate_1.strftime(\"%m\")) ## 200706 for July 2007 file\n monthid2 = str(mydate_2.strftime(\"%Y\")) + str(mydate_2.strftime(\"%m\")) ## 200606 for July 2007 file\n try:\n if category.lower() == \"rural\":\n index = 3\n elif category.lower() == \"urban\":\n index = 4\n else:\n index = 5\n sheet = book.sheet_by_index(index)\n list_states = sheet.col_values(0)\n xstart = list_states.index('Connecticut')\n xend = list_states.index('TOTALS')\n #list1 = sheet.col_slice(colx= 8,start_rowx=xstart,end_rowx= xend - 1)\n #list1 = [w.value for w in list1]\n list2 = sheet.col_slice(colx= 9,start_rowx=xstart,end_rowx= xend - 1)\n list2 = [w.value for w in list2]\n list3 = sheet.col_slice(colx= 0,start_rowx=xstart,end_rowx= xend - 1)\n list3 = [w.value.lower() for w in list3] ## take lowercase for direct match later\n df = pd.concat([pd.DataFrame(list3),pd.DataFrame(list2)], axis = 1) # ,pd.DataFrame(list1)\n #col_name_1 = category + '_Arterial_' + monthid1\n col_name_2 = category + '_Arterial_' + monthid2\n df.columns = ['State', col_name_2 ] # col_name_1, \n df[col_name_2].replace('', np.nan, inplace=True) ## removes rows with blank records ( zonal categories)\n df['State'].replace('', np.nan, inplace=True)\n curr_monthid = str(mydate.strftime(\"%Y\")) + str(mydate.strftime(\"%m\")) ## 200707 for July 2007 file\n df['data_monthid'] = curr_monthid\n df.dropna(subset=[col_name_2], inplace=True)\n df.dropna(subset=['State'], inplace=True)\n df = df[~df.State.str.contains(\"subtotal\")] ### causes problems on joins, there in most files\n df = df[df.State != \"total\"] ## causes problems on joins, is there only in specific files\n df['State'] = df.State.str.strip() ## removes leading and lagging white spaces if any\n df2 = pd.melt(df,id_vars=['State','data_monthid'],var_name=['category'], value_name='Million_Vehicle_Miles')\n return df2\n except:\n print(\"error in file \",os.path.basename(file_path))",
"def get_only_dates(filename):\n result = []\n with open(filename, \"r\") as file:\n for line in file.readlines():\n if \"-\" not in line:\n continue\n my_line = line.split(\" - \")\n date = my_line[0]\n day, month, year = date.split()\n day = day[:-2]\n if len(day) < 2:\n day = f'0{day}'\n month = get_month_number(month)\n result.append({\n \"date_original\": date,\n \"date_modified\": f\"{day}/{month}/{year}\"\n })\n return result",
"def data(self):\n\n try:\n sheet = load_workbook(self.arquivo, read_only=True)\n act_sheet = sheet.active\n lines = act_sheet.rows\n if self.l1 != 0:\n lines = islice(lines, self.l1, None)\n data = []\n for line in lines:\n if isinstance(self.usecols, tuple):\n content = [line[value].value for value in self.usecols]\n else:\n content = [line[self.usecols].value]\n\n if content[0] is not None:\n data.append(content)\n\n except InvalidFileException:\n book = xlrd.open_workbook(self.arquivo)\n sheet = book.sheet_by_index(0)\n data = []\n for line in range(self.l1, sheet.nrows, 1):\n conteudo = [sheet.row(line)[value].value if isinstance(sheet.row(line)[value].value, float)\n else 0.0 for value in self.usecols]\n data.append(conteudo)\n\n return data",
"def read_spectrograms_excel(filename):\n\n xl = pd.ExcelFile(filename)\n key = xl.sheet_names[0]\n df = pd.read_excel(xl, index_col=0)\n\n if df.index.dtype == pd.Timestamp:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d %H:%M:%S\")\n\n # Replace _ with \" \"\n key = \" \".join(key.split(\"_\"))\n\n return key, df",
"def get_timestamps( self, raster_pos=None ):\n if raster_pos is None:\n headers = self.time_specific_headers\n else:\n headers = self.get_raster_pos_headers( raster_pos )\n \n return [to_epoch( from_Tformat( h['DATE_OBS'] ) ) for h in headers]",
"def get_arterial(file_path,category):\n book = xlrd.open_workbook(file_path)\n file_name = os.path.basename(file_path)\n year = str(20) + \"\".join([str(s) for s in file_name if s.isdigit()]) ## gets the year from filename\n Month = strptime(file_name[2:5],'%b').tm_mon ## gets month no\n mydate = datetime.date(int(year),Month, 1) ## first day of the month and year\n #mydate_1 = mydate - datetime.timedelta(days=1) ## interested in last month of this year as data corresponds to last month and same year\n mydate_2 = mydate - datetime.timedelta(days=368) ## interested in last month of last year as data corresponds to last month and last year \n #monthid1 = str(mydate_1.strftime(\"%Y\")) + str(mydate_1.strftime(\"%m\")) ## 200706 for July 2007 file\n monthid2 = str(mydate_2.strftime(\"%Y\")) + str(mydate_2.strftime(\"%m\")) ## 200606 for July 2007 file\n try:\n if category.lower() == \"rural\":\n index = 3\n elif category.lower() == \"urban\":\n index = 4\n else:\n index = 5\n sheet = book.sheet_by_index(index)\n list_states = sheet.col_values(0)\n xstart = list_states.index('Connecticut')\n xend = list_states.index('TOTALS')\n #list1 = sheet.col_slice(colx= 6,start_rowx=xstart,end_rowx= xend - 1)\n #list1 = [w.value for w in list1]\n list2 = sheet.col_slice(colx= 7,start_rowx=xstart,end_rowx= xend - 1)\n list2 = [w.value for w in list2]\n list3 = sheet.col_slice(colx= 0,start_rowx=xstart,end_rowx= xend - 1)\n list3 = [w.value.lower() for w in list3] ## take lowercase for direct match later\n df = pd.concat([pd.DataFrame(list3),pd.DataFrame(list2)], axis = 1) # pd.DataFrame(list1),\n #col_name_1 = category + '_Arterial_' + monthid1\n col_name_2 = category + '_Arterial_' + monthid2\n df.columns = ['State', col_name_2 ] ## col_name_1,\n df[col_name_2].replace('', np.nan, inplace=True) ## removes rows with blank records ( zonal categories)\n df['State'].replace('', np.nan, inplace=True)\n curr_monthid = str(mydate.strftime(\"%Y\")) + str(mydate.strftime(\"%m\")) ## 200707 for July 2007 file\n df['data_monthid'] = curr_monthid\n df.dropna(subset=[col_name_2], inplace=True)\n df.dropna(subset=['State'], inplace=True)\n df = df[~df.State.str.contains(\"subtotal\")] ### causes problems on joins, there in most files\n df = df[df.State != \"total\"] ## causes problems on joins, is there only in specific files\n df['State'] = df.State.str.strip() ## removes leading and lagging white spaces if any\n df2 = pd.melt(df,id_vars=['State','data_monthid'],var_name=['category'], value_name='Million_Vehicle_Miles')\n return df2\n except:\n print(\"error in file \",os.path.basename(file_path))",
"def parse_date(self) -> str:\r\n for line in self.lines:\r\n line = ''.join(line)\r\n if 'updated' in line:\r\n index = line.find('Last updated')\r\n if index != -1:\r\n substring = line[index + 10: index + 50].split('.')[0][-13:]\r\n print(substring)\r\n return pd.to_datetime(substring)\r\n if 'Scottish test n' in line:\r\n index_date = line.find('h test n')\r\n print(index_date)\r\n if index_date != -1:\r\n return pd.to_datetime(line[index_date+15:index_date+29])",
"def read_times(self, slices=None):\n times = netCDF4.num2date(\n datetime.strptime(\n self.get_handler().SOURCE_START_DATE.split('.')[0],\n '%Y%m%d%H%M%S'\n )\n )\n return numpy.ma.array([times])",
"def dates(self):\n pass",
"def weather_reader_colab(filename, colab_files):\n data = pd.read_excel(io.BytesIO(colab_files[filename]), \n parse_dates=True, \n index_col='Time measured')\n return data",
"def extract_dates(data):\n dates = []\n \n for line in data.splitlines():\n if line[6:8] == \"20\":\n dates.append(datetime.strptime(line[6:16], '%Y-%m-%d').date())\n \n return list(set(dates))\n pass",
"def read_schedules(use, x):\n # read schedules from excel file\n occ = [x['Weekday_1'].values[:24], x['Saturday_1'].values[:24], x['Sunday_1'].values[:24]]\n el = [x['Weekday_2'].values[:24], x['Saturday_2'].values[:24], x['Sunday_2'].values[:24]]\n dhw = [x['Weekday_3'].values[:24], x['Saturday_3'].values[:24], x['Sunday_3'].values[:24]]\n month = x['month'].values[:12]\n\n if use == \"INDUSTRIAL\":\n pro = [x['Weekday_4'].values[:24], x['Saturday_4'].values[:24], x['Sunday_4'].values[:24]]\n else:\n pro = [np.zeros(24), np.zeros(24), np.zeros(24)]\n\n # read area per occupant\n area_per_occupant = x['density'].values[:1][0]\n\n return occ, el, dhw, pro, month, area_per_occupant",
"def data_reader_colab(filename, colab_files):\n data = pd.read_excel(io.BytesIO(colab_files[filename]), \n parse_dates=True, index_col='Time', \n usecols=range(2))\n return data",
"def FinConvExtractfromPickle(self, pickleLoc):\n df = pickle.load(open(pickleLoc, 'rb'))\n levels = df.dtypes.index.levels\n for l1 in levels[0]:\n for l2 in levels[1]:\n df[l1, l2, 'Date'] = df[l1,l2,'Date'].apply(ConvertExcelTime, convert_dtype=True)\n convertedLoc = os.path.splitext(pickleLoc)[0] + '_convertedDate.p'\n pickle.dump(df, open(convertedLoc, 'wb'))\n return convertedLoc",
"def get_dates(raw_table) -> \"list of dates\":\n dates = []\n found_first = False\n for i, dstr in enumerate([raw_table[i][0] for i in range(0, len(raw_table))]):\n if dstr:\n if len(dstr.split(\"/\")) == 3:\n d = datetime.datetime.strptime(dstr, '%m/%d/%Y')\n elif len(dstr.split(\"-\")) == 3:\n d = datetime.datetime.strptime(dstr, '%Y-%m-%d')\n else:\n # Not necessarily an error, could just be a non-date cell\n logging.debug(\"unknown date-format: {}\".format(dstr))\n continue\n dates.append(d)\n if not found_first:\n found_first = True\n logging.debug(\"Found first date: '{}' at i: {}\".format(d.isoformat(), i))\n elif found_first:\n logging.debug(\"Last date: {}\".format(d))\n break\n return dates",
"def _get_case_dates(self):\n path = \"//path/to/text/text()\"\n return [\n convert_date_string(date_string)\n for date_string in self.html.xpath(path)\n ]",
"def _load_time_series(self, path: str) -> np.ndarray:\n items = []\n previous = None\n for item in sorted(pathlib.Path(path).glob(\"*.nc\")):\n with xr.open_dataset(item) as ds:\n current = ds.ocean_time.values[0].astype(\"datetime64[M]\")\n if (previous is not None\n and (current - previous != np.timedelta64(1, \"M\"))):\n raise ValueError(\"Time series not continuous\")\n items.append((current, str(item)))\n previous = current\n length = max(len(item[1]) for item in items)\n return np.array(\n items,\n dtype={\n \"names\": (\"date\", \"path\"),\n \"formats\": (\"datetime64[M]\", f\"U{length}\"),\n },\n )",
"def as_data(self):\n wb = load_workbook(filename=self.file_path)\n ws = wb.active\n data = []\n\n for i, row in enumerate(ws.rows):\n if len(row) != self.max_columns:\n print(len(row))\n return []\n\n if i == 0:\n # skip file header\n continue\n\n try:\n d = self.get_row_dict(row)\n except Exception:\n return []\n\n data.append(d)\n\n return data"
] |
[
"0.6484449",
"0.5983393",
"0.59567344",
"0.5858225",
"0.5803949",
"0.5792485",
"0.5662788",
"0.56464463",
"0.5638216",
"0.5618037",
"0.5579476",
"0.5578673",
"0.55383044",
"0.5533895",
"0.5525505",
"0.55171365",
"0.55086786",
"0.54955643",
"0.54917186",
"0.5489937",
"0.54851663",
"0.54831266",
"0.5475454",
"0.5462265",
"0.5452194",
"0.54521203",
"0.5431147",
"0.5430281",
"0.5428513",
"0.5395187"
] |
0.63838965
|
1
|
Return RMSLE from the prediction and the expected answer.
|
def get_RMSLE(pred, truth):
assert len(pred) == len(truth)
diff_vect = np.log(pred + 1) - np.log(truth + 1)
diff_sum = np.sum(np.power(diff_vect, 2))
return np.sqrt(diff_sum / len(pred))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def rmsle(actual, predicted, *args, **kwargs):\n return np.sqrt(msle(actual, predicted))",
"def RMSLE(prediction, real):\n logarithmic_error = np.log1p(prediction) - np.log1p(real)\n score = np.sqrt(1/len(real) *np.sum(logarithmic_error**2))\n return score",
"def rmsle(y_true, y_pred):\n assert y_true.shape == y_pred.shape, \\\n ValueError(\"Mismatched dimensions between input vectors: {}, {}\".format(y_true.shape, y_pred.shape))\n return np.sqrt((1/len(y_true)) * np.sum(np.power(np.log(y_true + 1) - np.log(y_pred + 1), 2)))",
"def rmsle(self) -> float:\n return float(np.sqrt(np.mean(np.power(np.log1p(self.predicted) - np.log1p(self.true), 2))))",
"def msle(actual, predicted):\n return np.mean(sle(actual, predicted))",
"def compare_rmse(x_true, x_pred):\n x_true, x_pred = x_true.astype(np.float32), x_pred.astype(np.float32)\n return np.linalg.norm(x_true - x_pred) / (np.sqrt(x_true.shape[0] * x_true.shape[1] * x_true.shape[2]))",
"def calc_rmsle(y: np.ndarray, y_hat: np.ndarray) -> float:\n pass",
"def reserrorcalc(test_set, model):\n # Extracting X\n X = test_set[:,:-1]\n\n # Extracting labels\n Y = test_set[:,-1]\n residual_err = sum((model.predict(X) - Y) ** 2)\n return residual_err",
"def compute_RMSE(true_val, predicted_val, p_output=True) -> float:\n from sklearn.metrics import mean_squared_error\n rms = np.sqrt(mean_squared_error(np.array(true_val), predicted_val))\n if p_output:\n print('RMSE: {0}'.format(rms))\n return rms",
"def sle(actual, predicted):\n return (np.power(np.log(np.array(actual) + 1) -\n np.log(np.array(predicted) + 1), 2))",
"def msle(self, weights=None) -> float:\n return float(np.average((np.log1p(self.true) - np.log1p(self.predicted)) ** 2, axis=0, weights=weights))",
"def rmspe(self) -> float:\n return float(np.sqrt(np.mean(np.square(((self.true - self.predicted) / self.true)), axis=0)))",
"def rmsError(self, yTrue, yPred):\n if len(yPred) != len(yTrue):\n raise ValueError(\"Lengths of predicted and actual values doesn't match.\")\n\n noneCount = 0\n loss = 0\n for i in range(len(yTrue)):\n if yPred[i] == None:\n noneCount+=1\n else:\n loss += (yTrue[i] - yPred[i])**2\n loss = 0.5 * loss/len(yTrue)-noneCount\n return round(math.sqrt(loss), 2)",
"def rmsle_cv(model, dataset,y):\r\n kf = KFold(n_folds, shuffle=True, random_state=42).get_n_splits(dataset)\r\n rmse= np.log(-cross_val_score(model, dataset, y, scoring=\"neg_mean_absolute_error\", cv = kf))\r\n return(rmse)",
"def computeRmse(model, data, n):\n print \"RESULT_data:%s \" % ((data.map(lambda x: (x[0], x[1]))).take(50))\n predictions1 = model.predictAll(data.map(lambda x: (x[0], x[1])))\n print \"RESULT1: %s\" % predictions1\n predictionsAndRatings = predictions1.map(lambda x: ((x[0], x[1]), x[2])) \\\n .join(data.map(lambda x: ((x[0], x[1]), x[2]))) \\\n .values()\n #print \"RESULT2: %s\" % predictions1.take(11)\n return sqrt(predictionsAndRatings.map(lambda x: (x[0] - x[1]) ** 2).reduce(add) / float(n))",
"def msll(Y_true, Y_pred, V_pred, Y_train):\n mt, st = Y_train.mean(), Y_train.std()\n ll = norm.logpdf(Y_true, loc=Y_pred, scale=np.sqrt(V_pred))\n rand_ll = norm.logpdf(Y_true, loc=mt, scale=st)\n msll = - (ll - rand_ll).mean()\n return msll",
"def mle(data):\n\t\"\"\" return (tau, sigma ) \"\"\"\n\tcount_state_state,count_state_word,all_words = counts(data)\n\tsmooth_denom = len(all_words)\n\tsigma = get_sigma(count_state_state)\n\ttau = get_tau(count_state_word, smooth_denom)\n\treturn (tau,sigma)",
"def _compute_rmse(self, data):\n actual = data.rating.values\n pred = self._predict_all(data)\n rmse = np.sqrt(np.sum((actual - pred) **2) /len(pred))\n return rmse",
"def rmse(actual, predicted):\n rms = (actual-predicted)**2\n\n # Returning the sqaure root of the root mean square\n return float(np.sqrt(rms.mean()))",
"def rmse(predicted, actual):\n #maybe make some assertions, assume have same length & in right order\n interm_total = 0\n for i in range(len(predicted)):\n interm_total += (predicted[i] - actual[i]) ** 2\n return sqrt(interm_total / len(predicted))",
"def rmse_metric(actual, predicted):\r\n sum_error = 0.0\r\n for i in range(len(actual)):\r\n prediction_error = predicted[i] - actual[i]\r\n sum_error += (prediction_error ** 2)\r\n mean_error = sum_error / float(len(actual))\r\n return sqrt(mean_error)",
"def rmse(true, predictions):\n true = np.array(true)\n predictions = np.array(predictions)\n return mean_squared_error(true, predictions) ** 0.5",
"def evaluate(self, X_test, y_test):\n y_pred_train = self.pipeline.predict(self.X)\n mse_train = mean_squared_error(self.y, y_pred_train)\n rmse_train = np.sqrt(mse_train)\n \n self.mlflow_log_metric('rmse_train', rmse_train)\n \n y_pred_test = self.pipeline.predict(X_test)\n mse_test = mean_squared_error(y_test, y_pred_test)\n rmse_test = np.sqrt(mse_test)\n self.mlflow_log_metric('rmse_test', rmse_test)\n \n return (round(rmse_train, 3) ,round(rmse_test, 3))",
"def evaluate(self, X_test, y_test):\n \n y_pred = self.pipeline.predict(X_test)\n test_rmse = compute_rmse(y_pred, y_test)\n print(\"test rmse:\", test_rmse)\n return test_rmse",
"def mrr(ground_truth, prediction):\n rr = 0.\n for rank, item in enumerate(prediction):\n if item in ground_truth:\n rr = 1. / (rank + 1)\n break\n return rr",
"def get_r2_score(ground_truth, predicted):\n residual = np.sum(np.square(np.subtract(ground_truth, predicted)))\n print(residual)\n total = np.sum(np.square(np.subtract(ground_truth, np.mean(ground_truth))))\n print(total)\n return np.subtract(1.0, np.divide(residual, (total + 0.00000000001)))",
"def mse(result, expected):\n total_square_sum = 0\n for index1 in range(0, len(result)):\n total_square_sum += (result[index1] - expected[index1]) ** 2\n return total_square_sum / float(len(result))",
"def mse_r2(true, predicted):\n # Reshaping set of images\n # n_imgs, nx, ny = true.shape\n # true = np.reshape(true, (n_imgs, nx*ny))\n # predicted = np.reshape(predicted, (n_imgs, nx*ny))\n nx = 33\n ny = 33\n\n # Compute MSE\n se = np.sum((true - predicted)**2, axis=1)\n mse = se*(nx*ny)**-1\n\n # Compute R squared\n mean = np.mean(true, axis=1)\n r2 = 1 - se*np.sum((true - np.expand_dims(mean, axis=1))**2, axis=1)**-1\n\n return mse, r2",
"def rmse(actual: np.ndarray, predicted: np.ndarray):\n return np.sqrt(np.mean(np.square(_error(actual, predicted))))",
"def rmse(labels, predictions):\n n = len(labels)\n differences = numpy.subtract(labels, predictions)\n return numpy.sqrt(1.0/n * (numpy.dot(differences, differences)))"
] |
[
"0.7609951",
"0.7355589",
"0.7203462",
"0.718389",
"0.6820581",
"0.64459103",
"0.6406715",
"0.6388028",
"0.6317563",
"0.6281459",
"0.6191574",
"0.60744154",
"0.6051396",
"0.6048326",
"0.60155445",
"0.59951115",
"0.59895235",
"0.5938883",
"0.59192955",
"0.5914614",
"0.5897343",
"0.58904225",
"0.5843397",
"0.5823863",
"0.57472736",
"0.5739611",
"0.5714242",
"0.57066935",
"0.57009655",
"0.5688419"
] |
0.7670679
|
0
|
Return the path of the Bohrium systemwide configuration file
|
def config_file(self):
return join_path(self.prefix.etc.bohrium, "config.ini")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def getConfigPath():\n if sys.platform == 'linux':\n configpath = os.path.normpath(os.path.expanduser('~/.config/phobos'))\n elif sys.platform == 'darwin':\n configpath = os.path.normpath(os.path.expanduser('~/Library/Application Support/phobos'))\n elif sys.platform == 'win32':\n configpath = os.path.normpath(os.path.expanduser('~/AppData/Roaming/phobos'))\n else:\n configpath = 'ERROR: {0} not supported,'.format(sys.platform)\n return configpath",
"def get_config_file_location():\n\n return './' + CONFIG_FILE_NAME",
"def config_file_and_path():\n return str(rmfriend_dir() / 'config.cfg')",
"def config_path(self):\n if os.path.exists(self._config_path):\n if pyhocon.ConfigFactory.parse_file(self._config_path):\n return os.path.realpath(self._config_path)\n # TODO if string is url/git repo, download file locally first\n return None",
"def get_config_path():\n\n root = os.path.dirname(os.path.abspath(__file__))[:-5]\n config_path = os.path.join(root, 'config.ini')\n\n return config_path",
"def _get_config_path():\n return os.path.join(os.path.expanduser('~'))",
"def get_config_path() -> Path:\n config = os.getenv('TOM_CONFIG', '')\n return Path(config)",
"def configPath(self):\n return os.path.dirname(__file__)",
"def _app_config_file() -> str:\n if 'AISCALATOR_HOME' in os.environ:\n home = os.environ['AISCALATOR_HOME']\n file = os.path.join(home, \"config\", \"aiscalator.conf\")\n if os.path.exists(file):\n return file\n return os.path.join(os.path.expanduser(\"~\"), '.aiscalator',\n 'config', 'aiscalator.conf')",
"def path_config(self):\n return HOMEASSISTANT_CONFIG.format(HASSIO_SHARE_INT)",
"def system_conf_dir(self):\n return buildconfig.SPD_CONF_PATH",
"def get_global_config_path():\n\n return \"/etc/dapsenv/dapsenv.conf\"",
"def path(self):\n return os.path.join(self.config.get('path', os.getcwd()))",
"def cfg_path(self):\n return self._cfg_path",
"def config_path():\n dir_ = os.path.dirname(__file__)\n demo_dir = os.path.join(dir_, '../..')\n return os.path.join(demo_dir, 'mike_dev.ini')",
"def config_file(self):\n return self[CONFIG_FILE_KEY]",
"def get_config_file(self):\r\n return os.path.join(self.cloudletdir, \"applied_config\")",
"def get_config_filepath():\n scs_installation_dirs = _path_utils.get_addon_installation_paths()\n\n # SEARCH FOR CONFIG...\n scs_config_file = ''\n for i, location in enumerate(scs_installation_dirs):\n test_path = os.path.join(location, 'config.txt')\n if os.path.isfile(test_path):\n scs_config_file = test_path\n break\n\n # IF NO CONFIG FILE, CREATE ONE...\n if scs_config_file == '':\n lprint(\"S Creating new 'config.txt' file:\\n\\t %r\", (os.path.join(scs_installation_dirs[0], 'config.txt'),))\n scs_config_file = new_config_file(os.path.join(scs_installation_dirs[0], 'config.txt'))\n\n # print('SCS Blender Tools Config File:\\n \"%s\"\\n' % os.path.join(scs_installation_dirs[0], 'config.txt'))\n return scs_config_file",
"def config_file_address() -> str:\n\n config_files = json_files_from_folder(\"config\")\n config_file = choose_config(config_files) # Choice a config file if there is more then 1 in config folder\n return config_file",
"def _get_config_filepath(self):\n\t\tif self.configfilepath is None:\n\t\t\treturn os.path.join(self.workdir, \"config.txt\")\n\t\telse:\n\t\t\treturn self.configfilepath",
"def getConfigPath():\n\n global args, ConfigPathDefault\n\n if args.config_location:\n return args.config_location;\n return ConfigPathDefault;",
"def get_user_config_path():\n\n return \"{}/.dapsenv/dapsenv.conf\".format(expanduser(\"~\"))",
"def get_config_path(config):\n section = config.sections()[0]\n return Path(config.get(section, \"path\")).expanduser().absolute()",
"def determine_config() -> str:\n if os.environ.get(PortholeConfig.CONFIG_ENV_NAME) is not None:\n return os.environ.get(PortholeConfig.CONFIG_ENV_NAME)\n if os.path.isfile(PortholeConfig.DEFAULT_CONFIG_FILE):\n return PortholeConfig.DEFAULT_CONFIG_FILE\n for file_path in PortholeConfig.OTHER_ALLOWED_CONFIG_PATHS:\n if os.path.isfile(file_path):\n return file_path\n raise FileNotFoundError(\n \"Porthole is unable to locate a useable config file. \"\n \"Try setting the PORTHOLE_CONFIG environment variable, \"\n \"or creating a porthole.ini file in your main project directory.\"\n )",
"def get_default_config_path():\n if os.name == 'posix':\n config_path = os.path.join(os.path.expanduser(\"~\"), '.fpdb')\n elif os.name == 'nt':\n config_path = os.path.join(os.environ[\"APPDATA\"], 'fpdb')\n else: config_path = False\n return config_path",
"def path_config_docker(self):\n return HOMEASSISTANT_CONFIG.format(HASSIO_SHARE_EXT)",
"def default_config_file(self):\n return DEFAULT_CONFIG_FILEPATH",
"def _get_config_fname():\n directory = _get_vispy_app_dir()\n if directory is None:\n return None\n fname = op.join(directory, 'vispy.json')\n if os.environ.get('_VISPY_CONFIG_TESTING', None) is not None:\n fname = op.join(_TempDir(), 'vispy.json')\n return fname",
"def get_cfg_path(filename):\n return os.path.join(get_cfg_dir(), filename)",
"def get_instance_config_path():\n return join(settings.PROJECT_DIR, \"conf\", \"eoxserver.conf\")"
] |
[
"0.7627292",
"0.7395096",
"0.73803973",
"0.72431105",
"0.71532726",
"0.7132535",
"0.71078205",
"0.7089271",
"0.70785385",
"0.70739216",
"0.7015473",
"0.7002854",
"0.69692624",
"0.6962831",
"0.6919271",
"0.6863968",
"0.68277985",
"0.6817259",
"0.6779527",
"0.675732",
"0.67500997",
"0.67495424",
"0.67368305",
"0.6680615",
"0.667474",
"0.66669726",
"0.6665688",
"0.66622084",
"0.66566414",
"0.66180927"
] |
0.8231032
|
0
|
Generate a list of ranks that get harder to obtain as they approach the maximum
|
def generate_ranks(maximum: int, steps: int) -> List[int]:
ranks = []
for i in range(steps):
ranks += [maximum]
maximum = int(maximum * 0.75)
RANK_CUTOFFS = list(reversed(ranks))
return RANK_CUTOFFS
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def findRanks(toBeRanked, values):\n\treturn list(map(lambda e: findRank(e, values), toBeRanked))",
"def resolveTie(self, hand_ranking, tie_list):\n max_rank_list = [] \n\n for i in range(5):\n \"\"\" Lowest rank card as baseline \"\"\"\n curr_max_rank = 0 \n for player in tie_list:\n if hand_ranking.player_best_hand_dict[player.name][i].rank > curr_max_rank:\n curr_max_rank = hand_ranking.player_best_hand_dict[player.name][i].rank\n max_rank_list.append(curr_max_rank)\n\n \"\"\" Compare player hands to max_rank_list \"\"\"\n \"\"\" Start with final card and loop towards lowest rank \"\"\"\n for i in range(5-1, -1, -1):\n for player in tie_list:\n if hand_ranking.player_best_hand_dict[player.name][i].rank < max_rank_list[i] and len(tie_list) > 1:\n tie_list.remove(player)\n return tie_list",
"def get_rank_probabilities(n: int) -> List[float]:\n alpha = 3.5\n ranks = [1 / i**alpha for i in range(1, n + 1)]\n\n return [r / sum(ranks) for r in ranks]",
"def get_ranks(d): \n raise NotImplementedError(\"Problem 3 Incomplete\")",
"def findRelativeRanks(self, nums: List[int]) -> List[str]:\n scores = sorted(nums, reverse=True)\n rewards = {}\n for i, score in enumerate(scores):\n if i == 0:\n reward = 'Gold Medal'\n elif i == 1:\n reward = 'Silver Medal'\n elif i == 2:\n reward = 'Bronze Medal'\n else:\n reward = str(i + 1)\n rewards[score] = reward\n return [rewards[score] for score in nums]",
"def get_ranks(w_vector):\n tmp = np.flip(w_vector.argsort())\n ranks = np.empty_like(tmp)\n ranks[tmp] = np.arange(len(w_vector))\n return ranks",
"def _rank(measure):\n sort_idx = np.argsort(-measure)\n ranks = np.empty(len(measure), int)\n ranks[sort_idx] = np.arange(1, len(measure)+1)\n return ranks",
"def recommendation_ranking(self):\n iu = self.final_recommendation_score_matrix()\n new_iu = []\n for row in iu:\n li = []\n temp = row\n if self.product != \"dist\":\n temp = -np.sort(-temp)\n for element in row:\n li.append(binary_search_opp(temp,element)+1) \n else:\n temp = np.sort(temp)\n for element in row:\n li.append(np.searchsorted(temp,element)+1)\n new_iu.append(li)\n return np.array(new_iu)",
"def ring_winners(b, players):\n winners = []\n winrank = ''\n s = [evaluator.evaluate(b, p) for p in players]\n for i, rank in enumerate(s):\n if rank == min(s):\n winners.append(i)\n winrank = evaluator.class_to_string(evaluator.get_rank_class(rank))\n return [winners, winrank]",
"def __rank__(self) -> int:",
"def climbingLeaderboard(scores, alice):\n unique_scores = list({score: None for score in scores}.keys())[::-1]\n ranks = []\n # last_score_index = 0\n for game_score in alice:\n for i, score in enumerate(unique_scores):\n if score > game_score:\n ranks.append(len(unique_scores) - i + 1)\n break\n elif score == game_score:\n ranks.append(len(unique_scores) - i)\n break\n elif i == len(unique_scores) - 1:\n ranks.append(1)\n else:\n continue\n\n return ranks",
"def card_ranks(hand):\n ranks = ['--23456789TJQKA'.index(r) for r, s in hand]\n ranks.sort(reverse = True)\n return [5, 4, 3, 2, 1] if (ranks == [14, 5, 4, 3, 2]) else ranks",
"def _rank(self, ranking, n):\n return nlargest(n, ranking, key=ranking.get)",
"def gen_rank(names, merits):\n if len(names) > 1:\n winner = random_choice(range(len(names)), merits)\n winner_name = names.pop(winner)\n re_scale_factor = 1.0/(1.0-merits.pop(winner))\n merits = [y*re_scale_factor for y in merits]\n return [winner_name] + gen_rank(names, merits)\n else: \n return [names[0]]",
"def _compute_relative_leaderboard_indexes(ranking, size):\n if ranking == 0 or ranking == 1:\n return (0, 5)\n elif ranking == size or ranking == size-1:\n return (max(0, size-5), size)\n else:\n return (max(0, ranking-2), max(size, ranking+3))",
"def rank_teams_of_curr_run(curr_score, curr_ranking):\n for place in curr_ranking:\n curr_place = get_key_with_max_value(curr_score)\n curr_ranking[place] = curr_ranking[place].__add__([curr_place])\n curr_score.pop(curr_place)\n return curr_ranking",
"def get_rank(score):\n if score in range(0, 500):\n return RANKTYPES[0]\n elif score in range(500, 1500):\n return RANKTYPES[1]\n elif score in range(1500, 2000):\n return RANKTYPES[2]\n elif score in range(2000, 2500):\n return RANKTYPES[3]\n elif score in range(2500, 3000):\n return RANKTYPES[4]\n elif score in range(3000, 4000):\n return RANKTYPES[5]\n elif score in range(4000, 5500):\n return RANKTYPES[6]\n elif score > 5500:\n return RANKTYPES[7]",
"def _rank(self):\r\n return sorted(self.player_points.items(),key=lambda x:x[1],reverse=True)",
"def questionScores():\n rank = [1,2,3,4]\n scores = \"\"\n for x in range(4):\n rand = random.randint(1, 4)\n while rank[rand-1] == 0:\n rand = random.randint(1,4)\n scores += str(rank[rand-1])\n rank[rand-1] = 0\n return scores",
"def rank():\n return 0",
"def card_ranks(cards):\n ranks = [\"--23456789TJQKA\".index(r) for r,s in cards] # Each card contains a rank and a suit, hand/cards == [(11, 'Q'), (9, 'D')] \n # Using a \"Rank Strings Array\" (i.e using an array to represent the rank strings) to index it for the ranks\n ranks.sort(reverse=True)\n return [5, 4, 3, 2, 1] if (ranks == [14, 5, 3, 2, 1]) else ranks",
"def hand_rank(hand):\n ranks = card_ranks(hand) # ranks is a list of all the ranks. A sorted list of ranks is returned\n if straight(hand) and flush(hand): # Straight flush\n return (8, max(ranks)) # 2 3 4 5 6 (8, 6) 6 7 8 9 T (8, 10)\n elif kind(4, ranks): # Here kind(4, ranks) is used to return a bolean value\n # kind(4, ranks) returns the int when true, returns false if not true (used as boolean)\n return (7, kind(4, ranks), kind(1, ranks)) # 9 9 9 9 3 (7, 9, 3) 9 9 9 9 5 (7, 9, 5)\n elif kind(3, ranks) and kind(2, ranks): # full house\n return (6, kind(3, ranks), kind(2, ranks))\n elif flush(hand): # flush\n return (5, ranks)\n elif straight(ranks): # straight\n return (4, max(ranks))\n elif kind(3, ranks): # 3 of a kind\n return (3, kind(3, ranks), ranks)\n elif two_pair(ranks): # 2 pair\n return (2, two_pair(ranks), ranks)\n elif kind(2, ranks): # kind\n return (1, kind(2, ranks), ranks)\n else: # high card\n return (0, ranks)",
"def findRelativeRanks(nums):\n compare_lst = copy.deepcopy(nums)\n compare_lst.sort(reverse=True)\n for i in nums:\n compare_index = compare_lst.index(i)\n nums_index = nums.index(i)\n if compare_index > 2:\n nums[nums_index] = str(compare_index + 1)\n elif compare_index == 0:\n nums[nums_index] = 'Gold Medal'\n elif compare_index == 1:\n nums[nums_index] = 'Silver Medal'\n else:\n nums[nums_index] = 'Bronze Medal'\n return nums",
"def hand_rank(hand):\n ranks = card_ranks(hand)\n if straight(ranks) and flush(hand):\n return (8, max(ranks))\n elif kind(4, ranks):\n return (7, kind(4, ranks), kind(1, ranks))\n elif kind(3, ranks) and kind(2, ranks):\n return (6, kind(3, ranks), kind(2, ranks))\n elif flush(hand):\n return (5, ranks)\n elif straight(ranks):\n return (4, max(ranks))\n elif kind(3, ranks):\n return (3, kind(3, ranks), ranks)\n elif two_pair(ranks):\n return (2, two_pair(ranks), ranks)\n elif kind(2, ranks):\n return (1, kind(2, ranks), ranks)\n else:\n return (0, ranks)",
"def assignRanks(self):\r\n\t\trank = 0\r\n\t\tscores = list(self._playerScores)\r\n\t\tscores.reverse()\r\n\t\tfor playerScore in scores:\r\n\t\t\tif not playerScore.has(NOT_MET) or not playerScore.value(NOT_MET):\r\n\t\t\t\trank += 1\r\n\t\t\t\tplayerScore.set(RANK, smallText(BugUtil.colorText(u\"%d\" % rank, ScoreOpt.getRankColor())))\r\n\t\tif rank > 0:\r\n\t\t\tself._anyHas[RANK] = True",
"def climbingLeaderboard(scores, alice):\n\n # unique scores\n scores = sorted(list(set(scores))) # asc\n player_ranks = []\n idx = 0\n n = len(scores)\n\n for alice_score in alice: # alice in asc order\n \n # Find the rank. For next alice score (which is not smaller), continue from the same index\n while (n > idx and alice_score >= scores[idx]):\n idx += 1\n\n player_ranks.append(n+1-idx)\n\n return player_ranks",
"def ranks_from_scores(scores, rank_gap=1e-15):\n prev_score = None\n rank = 0\n for i, (key, score) in enumerate(scores):\n try:\n if abs(score - prev_score) > rank_gap:\n rank = i\n except TypeError:\n pass\n\n yield key, rank\n prev_score = score",
"def abilityScores():\n\n scores_list = []\n\n for i in range(6):\n temp_list = []\n for j in range(4):\n temp_list.append(r.choice([1,2,3,4,5,6]))\n temp_list.sort()\n scores_list.append(temp_list[1]+temp_list[2]+temp_list[3])\n scores_list.sort()\n return scores_list",
"def get_all_rankings(session: CondorSession) -> List[sc.Ranking]:\n return [sc.Ranking(matrix) for matrix in RankingMatrix.list(session)]",
"def recommend_from_scores(scores: List[List[float]], n: int) -> List[List[int]]:\n\n def top_idx(scores):\n return np.array(scores).argsort()[::-1][:n]\n\n return [top_idx(s) for s in scores]"
] |
[
"0.673976",
"0.67047715",
"0.6614033",
"0.6538433",
"0.6468819",
"0.63788843",
"0.63333786",
"0.63292336",
"0.62728506",
"0.62722045",
"0.62637275",
"0.6252744",
"0.6246774",
"0.6220911",
"0.621737",
"0.6209077",
"0.618397",
"0.6162317",
"0.6145225",
"0.6116494",
"0.61029476",
"0.60788256",
"0.6077375",
"0.6055757",
"0.60540545",
"0.60349864",
"0.6001205",
"0.5992214",
"0.59857064",
"0.5953704"
] |
0.76253545
|
0
|
Get the rank for a given number of points
|
def get_rank(points: int, cutoffs: List[int]) -> int:
rank = 0
for i, cutoff in enumerate(cutoffs):
if points < cutoff:
if i == 0:
break
else:
rank = i - 1
break
else:
rank = RANK_COUNT - 1
return rank
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_rank(self, points):\n sql_command = \"SELECT * FROM points WHERE amount > ?;\"\n cursor, connection = self.execute_command_get_connection(sql_command, [points])\n\n all = cursor.fetchall()\n cursor.close()\n connection.close()\n return len(all) + 1",
"def rank():\n return 0",
"def _get_rank(self,fitness):\n # infact you can get the order or rank by only once sort.\n rank=fitness[:,0].argsort().argsort() # [n]\n return rank",
"def _rank(self, ranking, n):\n return nlargest(n, ranking, key=ranking.get)",
"def rank(self):\n return self.lib.calculate_rank()",
"def _rank(self, ranking, n):\n return nlargest(n, ranking, key=ranking.get)",
"def get_ranked_points(zpoints, dsq):\n pos_map = calc_positions(zpoints, dsq)\n rpoints = calc_ranked_points(pos_map, dsq)\n return rpoints",
"def rank() -> int:\n return dist.get_rank() if dist.is_initialized() else 0",
"def get_rank(self) -> int:\r\n return self.rank",
"def get_rank(self) -> int:\n return dist.get_rank()",
"def _rank(self):\r\n return sorted(self.player_points.items(),key=lambda x:x[1],reverse=True)",
"def __rank__(self) -> int:",
"def rank(self):\n rank = 0\n rho = self.array_form[:]\n n = self.size - 1\n size = n + 1\n psize = int(ifac(n))\n for j in xrange(size - 1):\n rank += rho[j]*psize\n for i in xrange(j + 1, size):\n if rho[i] > rho[j]:\n rho[i] -= 1\n psize //= n\n n -= 1\n return rank",
"def get_rank() -> int:\n return collective.get_rank()",
"def get_ranks(d): \n raise NotImplementedError(\"Problem 3 Incomplete\")",
"def points(self):\r\n\t\tif self.rank() in self.point_sysm:\r\n\t\t\treturn self.point_sysm[self.rank()]\r\n\t\telse:\r\n\t\t\treturn (self.rank() + 2)",
"def get_num_hit_rank(boxes_truth, boxes_pred, rank):\n\n def is_hit(box_truth, box_pred):\n return is_label_match_rank(box_truth, box_pred, rank)\n\n return get_num_hit(boxes_truth, boxes_pred, is_hit)",
"def prufer_rank(self):\n r = 0\n p = 1\n for i in range(self.nodes - 3, -1, -1):\n r += p*self.prufer_repr[i]\n p *= self.nodes\n return r",
"def get_rank(self, score, answer, entities_space, num_ent):\n if answer not in entities_space:\n rank = num_ent\n else:\n answer_prob = score[entities_space.index(answer)]\n score.sort(reverse=True)\n rank = score.index(answer_prob) + 1\n return rank",
"def get_rank(self):\r\n return self.rank",
"def get_hs_rank(self, points):\n p = points\n rank = self.RANKS\n while p > 0 and rank > 0:\n p = p - self.BASE * math.pow(self.FACTOR, (self.RANKS - rank))\n rank = rank - 1\n\n if rank > 0:\n return str(rank)\n else:\n return str(self.get_rank(points)) + \" Legend\"",
"def get_rank(self):\n return self.rank",
"def get_rank(self):\n return int(self._rank)",
"def points(self):\r\n\t\tif self.rank() >= 9:\r\n\t\t\treturn self.point_sysm[self.rank()]\r\n\t\telse:\r\n\t\t\treturn 0",
"def rank(self):\r\n\t\trank = self.n % 13\r\n\t\treturn rank",
"def getRank(self):\r\n return self.rank",
"def get_rank(self):\n return self.__rank",
"def get_rank(self, pb):\n\n for rank in self.RANKS:\n start = self.RANKS[rank][\"ProgressStart\"]\n # 1 is not subtracted as we're calling range\n end = start + self.RANKS[rank][\"Progress\"]\n if pb in range(start, end):\n return int(rank)\n else:\n return 35",
"def rank_in_club(user, club):\n posel_ids = [p.id for p in club.posel_set.all()]\n return rank(user, posel_ids)",
"def rank(self) -> tskit.Rank:\n return combinatorics.RankTree.from_tsk_tree(self).rank()"
] |
[
"0.77205133",
"0.7398081",
"0.7045258",
"0.70051664",
"0.6996832",
"0.6941575",
"0.6934668",
"0.6901205",
"0.6886502",
"0.6873956",
"0.68593204",
"0.68586534",
"0.6857211",
"0.684011",
"0.68172395",
"0.68114007",
"0.68016917",
"0.6763181",
"0.6714917",
"0.6704349",
"0.6667565",
"0.66063935",
"0.6605187",
"0.6545122",
"0.65383613",
"0.65299815",
"0.64528286",
"0.64409536",
"0.64391875",
"0.64135253"
] |
0.76279044
|
1
|
moves the target further out as a % of the screen
|
def move_target(self, distance_adjustment):
self.x = float(self.screen_rect.right - self.width)
self.x = self.x * distance_adjustment
self.rect.x = self.x
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def assign_upLimit():\r\n player.rect.y = 25",
"def assign_downLimit():\r\n player.rect.y = 100",
"def update_target(self):\n\t\tself.check_top()\n\t\tself.check_bottom()\n\t\tself.update()\n\t\tself.screen.fill(self.target_color, self.rect)",
"def move(self):\n if random.random() < 0.5:\n self.y = (self.y + 1) % 100\n else:\n self.y = (self.y - 1) % 100\n if random.random() < 0.5:\n self.x = (self.x + 1) % 100\n else:\n self.x = (self.x - 1) % 100",
"def update(self):\n\t\tself.y += (self.settings.target_speed * self.target_direction)\n\t\tself.rect.y = self.y",
"def move_finger5(percent):\n percent = _clamp_percent(percent)\n _send_request(f5=percent)",
"def move(self):\n self.val = (pygame.mouse.get_pos()[\n 0] - self.xpos - 10) / 80 * (self.maxi - self.mini) + self.mini\n if self.val < self.mini:\n self.val = self.mini\n if self.val > self.maxi:\n self.val = self.maxi",
"def _walk(self):\n new_pos = self.rect.move((self.move, 0)) # move 9 pixel to the right per frame\n if self.rect.left < self.area.left or self.rect.right > self.area.right:\n self.move = -self.move # move to the opposite direction when the chimp position exceeds the screen\n new_pos = self.rect.move((self.move, 0))\n self.image = pygame.transform.flip(\n self.image, 1, 0\n ) # mirror the chimp to make it looks like turning around\n self.rect = new_pos",
"def head_towards(self):\n dest = self.target_destination - self.location\n if dest.length() != 0:\n dest.scale_to_length(self.speed)\n dest.normalize()\n self.rect.left += dest.x\n self.rect.top += dest.y",
"def move_finger4(percent):\n percent = _clamp_percent(percent)\n _send_request(f4=percent)",
"def move(self):\r\n min_x = self.__screen.SCREEN_MIN_X\r\n min_y = self.__screen.SCREEN_MIN_Y\r\n delta_x = self.__screen.SCREEN_MAX_X - min_x\r\n delta_y = self.__screen.SCREEN_MAX_Y - min_y\r\n\r\n # new location formula according to pdf.\r\n new_x = (self.__x_speed + self.__x - min_x) % delta_x + min_x\r\n new_y = (self.__y_speed + self.__y - min_y) % delta_y + min_y\r\n self.__x, self.__y = new_x, new_y",
"def move_finger2(percent):\n percent = _clamp_percent(percent)\n _send_request(f2=percent)",
"def move_to_start(self):\n self.pos = (SCREEN_WIDTH / 2, SCREEN_HEIGHT - 64)",
"def move_finger1(percent):\n percent = _clamp_percent(percent)\n _send_request(f1=percent)",
"def move_up(self):\n if self.center.y < (self.screen_height - (self.height / 2)):\n self.center.y += 5",
"def set_fan_target(self, target_percent):\n self.__fan_target = target_percent\n self.fan_speed_dac.set_output_scaled(1.0 - (target_percent / 100.0))",
"def assign_rightLimit():\r\n player.rect.x = WIDTH - 75",
"def move(self, max_width):\n if self.x_pos <= 20:\n self.SPEED = abs(self.SPEED)\n elif self.x_pos >= max_width - 40:\n self.SPEED = -abs(self.SPEED)\n self.x_pos += self.SPEED",
"def move(self):\n\n if self.rect.right >= SCREEN_WIDTH:\n self.rect.right = SCREEN_WIDTH\n elif self.rect.left <= 0:\n self.rect.left = 0\n #elif self.rect.right < SCREEN_WIDTH and self.rect.left: \n \n self.rect.move_ip(self.speed_p,0)",
"def move(self):\n\n if self.range > 0:\n self.dirty = 1\n self.rect.move_ip([self.x * self.speed, self.y * self.speed])\n self.range -= self.speed\n else:\n self.kill()",
"def move_finger3(percent):\n percent = _clamp_percent(percent)\n _send_request(f3=percent)",
"def update(self):\n \n self.rect.x += self.change_x\n self.rect.y += self.change_y\n \n if self.rect.x < 0:\n self.rect.x = 0\n if self.rect.x > screen_width - 60:\n self.rect.x = screen_width - 60\n if self.rect.y < 0:\n self.rect.y = 0 \n \n if self.rect.y > screen_height - 60:\n self.rect.y = screen_height - 60",
"def moveBasedOnCurrentMomentum(self):\n self.xPos-=self.xMomentum\n self.yPos-=self.yMomentum\n self.syncSpriteCoordinates()",
"def jump(self):\n self.vy = -9",
"def move_down(self):\r\n if self.rect.bottom < BG_HEIGHT - 60:\r\n self.rect.top += self.speed",
"def move_to(self, new_pos, pass_go=True):\r\n new_pos = new_pos % 40\r\n if self.pos > new_pos and pass_go:\r\n self.money += 200\r\n self.pos = new_pos",
"def move_up(self):\r\n if self.rect.top > 0:\r\n self.rect.top -= self.speed",
"def move_down(self):\n self.y -= 1",
"def move_down(self):\n if self.center.y > (self.height / 2):\n self.center.y -= 5",
"def move_up(self):\n self.move_measurement(-1)"
] |
[
"0.66254646",
"0.65339065",
"0.65005153",
"0.64133626",
"0.6320612",
"0.6276562",
"0.6270425",
"0.62666744",
"0.62145644",
"0.6178224",
"0.6135446",
"0.61301184",
"0.6129795",
"0.61164856",
"0.6073802",
"0.6068467",
"0.6010894",
"0.6008223",
"0.5989418",
"0.5989335",
"0.5984468",
"0.5976463",
"0.5954948",
"0.59352046",
"0.5934136",
"0.59162384",
"0.5881952",
"0.587772",
"0.5869929",
"0.5869735"
] |
0.68412036
|
0
|
Checks top to target to see if it hit top of screen
|
def check_top(self):
if self.rect.top <=0:
self.target_direction = 1
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _check_autos_top(self):\n\t\tscreen_rect = self.screen.get_rect()\n\t\tfor auto in self.autos.sprites():\n\t\t\tif auto.rect.top <= screen_rect.top:\n\t\t\t\t# Treat this the same as if the pigeon got hit.\n\t\t\t\tself._pigeon_hit()\n\t\t\t\tbreak",
"def hits_top_or_bottom(self):\n if self.y >= self.scene.screen.get_height() - self.image.get_height() or self.y <= 0:\n return True\n else:\n return False",
"def isTop(self):\n return self.top",
"def did_collide_top_bottom(self):\n\n y_coord = self.getY()\n return y_coord < 0 or (y_coord + self.ball_size[1]) > Configuration.windowHeight",
"def top_visible(self) -> bool:\n return self.vertical_scroll == 0",
"def check_in_screen(self):\n if self.rect.colliderect(screen_rect) and not self.moving:\n return True\n return False",
"def is_target(self):\n\t\treturn self.window and self.window.target is self",
"def at_target(self):\n return self.location == self.target_location",
"def check_edges(self):\n\t\tbottom_screen_limit = 2 * self.rect.height\n\t\tscreen_rect = self.screen.get_rect()\n\t\tif (self.rect.top <= 100) or (self.rect.bottom >= self.screen_rect.bottom):\n\t\t#self.rect.bottom >= self.screen_rect.bottom:\n\t\t\treturn True",
"def is_target(top_container):\n\tif '.' not in top_container.get('barcode', ''):\n\t\treturn True\n\telse:\n\t\treturn False",
"def check_ball_on_target():\n\n pass",
"def always_top(self) -> bool:\n return bool(self.tk_ref.wm_attributes('-topmost'))",
"def update_target(self):\n\t\tself.check_top()\n\t\tself.check_bottom()\n\t\tself.update()\n\t\tself.screen.fill(self.target_color, self.rect)",
"def IsTopSnappable(self):\r\n \r\n return self.HasFlag(self.optionTopSnapped)",
"def testPsychOnTop(self):\n attr = self.session.create_visit_attr()\n\n self.util.intTypeTest(self, attr, \"on_top\")\n\n self.util.intPropertyTest(self, attr, \"on_top\")",
"def HasGripperTop(self):\r\n\r\n return self.HasFlag(self.optionGripperTop)",
"def check_edges(self):\n\t\tscreen_rect = self.screen.get_rect()\n\t\tif self.rect.bottom >= screen_rect.bottom or self.rect.top <= -1:\n\t\t\treturn True",
"def check_for_top(self) -> bool:\n\t\tboolean_expression_has_top = False\n\t\texpression_has_top = False\n\t\tif self.boolean_expression:\n\t\t\tboolean_expression_has_top = self.boolean_expression.check_for_top()\n\t\tif self.expression:\n\t\t\texpression_has_top = self.expression.check_for_top()\n\t\treturn boolean_expression_has_top or expression_has_top",
"def checkBottom(self):\n exposed = True\n for sprite in self.overlapping_sprites:\n if sprite not in self.game.neutrinos:\n a = abs(self.bottom - sprite.top)\n b = abs(self.top - sprite.bottom)\n c = abs(self.left - sprite.right)\n d = abs(self.right - sprite.left)\n if a < b and a < c and a < d:\n exposed = False\n break\n return exposed",
"def top(self):\n if self.goals:\n return self.goals[self.stack[-1]]\n else:\n return False",
"def sees_home_tag(self):\n detections = self.swarmie.get_latest_targets().detections\n\n for detection in detections:\n if detection.id == 256:\n return True\n\n return False",
"def HitTest(self, x, y):\r\n\r\n if self.target.GetScreenRect().Contains((x, y)):\r\n return wx.ALL\r\n\r\n return -1",
"def check_off_screen(self):\r\n for bullet in self.bullets:\r\n if bullet.is_off_screen(SCREEN_WIDTH, SCREEN_HEIGHT):\r\n self.bullets.remove(bullet)\r\n\r\n for target in self.targets:\r\n if target.is_off_screen(SCREEN_WIDTH, SCREEN_HEIGHT):\r\n self.targets.remove(target)\r\n # if standard and strong target off the screen, it loses 1 point. Otherwise, it remains the score\r\n if not (target.type == \"Bonus\" or target.type == \"Safe\"):\r\n self.score -= 1",
"def outOfScreen(self):\n x,y = self.currentLevel.transformToScreenCoordinate(self.position)\n w,h = cblocals.GAME_SCREEN_SIZE\n if x<0 or y<0 or x>x or y>h:\n return True\n return False",
"def has_target(self):\n return self.target is not None",
"def _test_display_up_button(self):\n return (self.product_displays.top_index > 0)",
"async def is_target_reached(self) -> bool: # type: ignore\n ...",
"def canStack(bottom, top):\n bw, bh, bd = bottom\n tw, th, td = top\n return (bw < tw) and (bh < th) and (bd < td)",
"def need_target(self):\n\t\t# if we search for color targets, we are using cpu and potentially throwing the copter off the real target\n\t\t# can make this decision more complex if needded\n\t\tdelta_t = time() - self.t_last_seen\n\t\ttime_to_see = delta_t > .6 # arbitrary time threshold over which we should probably look for color targets\n\t\t\n\t\treturn time_to_see",
"def _isInScreen(self, pos):\n if type(pos) is Vec2:\n return pos.y >= 0 and pos.y <= self.screenSize[1] and pos.x >= 0 \\\n and pos.x <= self.screenSize[0]\n\n return pos[1] >= 0 and pos[1] <= self.screenSize[1] and pos[0] >= 0 \\\n and pos[0] <= self.screenSize[0]"
] |
[
"0.733528",
"0.73351",
"0.6623284",
"0.6466618",
"0.6390728",
"0.62321746",
"0.6225023",
"0.62215257",
"0.6199803",
"0.61606985",
"0.6129186",
"0.610839",
"0.6086093",
"0.6061291",
"0.6013783",
"0.598041",
"0.5974214",
"0.5962553",
"0.58782667",
"0.58218175",
"0.58168215",
"0.5798178",
"0.57614124",
"0.57439274",
"0.5715913",
"0.5681475",
"0.56807196",
"0.56783926",
"0.5660252",
"0.56162345"
] |
0.8144298
|
0
|
Setup strategies to use by the validator. These strategies can be provided
|
def _using(*args, validator: "DictValidator") -> "DictValidator":
def setup_strategy(validator, strategy) -> "DictValidator":
if isinstance(strategy, SortingStrategy):
validator.sorting = strategy
elif isinstance(strategy, FilteringStrategy):
validator.filtering = strategy
elif isinstance(strategy, PrintingStrategy):
validator.printing = strategy
else:
raise CertumException("The strategy provided for the validator is unknown.")
return validator
for arg in args:
if isinstance(arg, list):
for strategy in arg:
validator = setup_strategy(validator, strategy)
elif isinstance(arg, Strategy):
validator = setup_strategy(validator, arg)
else:
raise CertumException("The strategy provided for the validator is unknown.")
return validator
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def set_default_strategies(self, fleetmanager_strategy, transport_strategy, customer_strategy, directory_strategy,\n station_strategy):\n self.fleetmanager_strategy = load_class(fleetmanager_strategy)\n self.transport_strategy = load_class(transport_strategy)\n self.customer_strategy = load_class(customer_strategy)\n self.directory_strategy = load_class(directory_strategy)\n self.station_strategy = load_class(station_strategy)\n logger.debug(\"Loaded default strategy classes: {}, {}, {}, {} and {}\".format(self.fleetmanager_strategy,\n self.transport_strategy,\n self.customer_strategy,\n self.directory_strategy,\n self.station_strategy))",
"def initialize_location_strategies(self):\n locator_manager.register_locators(\"sf\", lex_locators)\n locator_manager.register_locators(\"text\", \"Salesforce.Locate Element by Text\")\n locator_manager.register_locators(\"title\", \"Salesforce.Locate Element by Title\")\n\n # This does the work of actually adding all of the above-registered\n # location strategies, plus any that were registered by keyword\n # libraries.\n locator_manager.add_location_strategies()",
"def set_strategies(players, strategies):\n if players.num_players != len(strategies):\n raise ValueError(\"len(strategies) must equal num_players\")\n for player, strategy in zip(players.tuple_, strategies):\n player.play = MethodType(strategy, player, Player)",
"def test_unexpected_strategy():\n assert strategies == {\n 'css': FindByCss,\n 'xpath': FindByXPath,\n 'tag': FindByTag,\n 'name': FindByName,\n 'text': FindByText,\n 'id': FindById,\n 'value': FindByValue,\n }",
"def __init__(self):\n self.strategy = Strategy(self)",
"def strategy(func):\n strategies.append(func)\n return func",
"def init_default_strategies() -> None:\n register_string_format(\"binary\", st.binary())\n register_string_format(\"byte\", st.binary().map(lambda x: b64encode(x).decode()))\n\n def make_basic_auth_str(item: Tuple[str, str]) -> str:\n return _basic_auth_str(*item)\n\n latin1_text = st.text(alphabet=st.characters(min_codepoint=0, max_codepoint=255))\n\n register_string_format(\"_basic_auth\", st.tuples(latin1_text, latin1_text).map(make_basic_auth_str)) # type: ignore\n register_string_format(\"_bearer_auth\", st.text().map(\"Bearer {}\".format))",
"def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy4))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # Add are you a lucker strategies\n strategies.extend(\n generate_meta_strategy_pair(AreYouALuckerStrategy))\n\n # Add Greenberg strategies\n strategies.extend(\n generate_meta_strategy_pair(GreenbergStrategy))\n\n # Add RPS Meta Fix strategies\n strategies.extend(\n generate_meta_strategy_pair(RPSMetaFixStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations",
"def strategy(self, strategy):\n\n if strategy == \"auto\":\n error = dll.wasmtime_config_strategy_set(self.__ptr__, 0)\n elif strategy == \"cranelift\":\n error = dll.wasmtime_config_strategy_set(self.__ptr__, 1)\n elif strategy == \"lightbeam\":\n error = dll.wasmtime_config_strategy_set(self.__ptr__, 2)\n else:\n raise WasmtimeError(\"unknown strategy: \" + str(strategy))\n if error:\n raise WasmtimeError.__from_ptr__(error)",
"def setup(cls):\n cls.location = {\"longitude\": 0.1270, \"latitude\": 51.5194}\n cls.search_query = {\n \"search_key\": \"intro_service\",\n \"search_value\": \"intro_alice\",\n \"constraint_type\": \"==\",\n }\n cls.search_radius = 5.0\n cls.admin_host = \"127.0.0.1\"\n cls.admin_port = 8021\n cls.ledger_url = \"http://127.0.0.1:9000\"\n config_overrides = {\n \"models\": {\n \"strategy\": {\n \"args\": {\n \"location\": cls.location,\n \"search_query\": cls.search_query,\n \"search_radius\": cls.search_radius,\n \"admin_host\": cls.admin_host,\n \"admin_port\": cls.admin_port,\n \"ledger_url\": cls.ledger_url,\n }\n }\n },\n }\n\n super().setup(config_overrides=config_overrides)\n\n # behaviours\n cls.faber_behaviour = cast(\n FaberBehaviour,\n cls._skill.skill_context.behaviours.faber,\n )\n\n # dialogues\n cls.default_dialogues = cast(\n DefaultDialogues, cls._skill.skill_context.default_dialogues\n )\n cls.http_dialogues = cast(\n HttpDialogues, cls._skill.skill_context.http_dialogues\n )\n cls.oef_search_dialogues = cast(\n OefSearchDialogues, cls._skill.skill_context.oef_search_dialogues\n )\n\n # handlers\n cls.http_handler = cast(HttpHandler, cls._skill.skill_context.handlers.http)\n cls.oef_search_handler = cast(\n OefSearchHandler, cls._skill.skill_context.handlers.oef_search\n )\n\n # models\n cls.strategy = cast(Strategy, cls._skill.skill_context.strategy)\n\n cls.logger = cls._skill.skill_context.logger\n\n # mocked objects\n cls.mocked_method = \"SOME_METHOD\"\n cls.mocked_url = \"www.some-url.com\"\n cls.mocked_version = \"some_version\"\n cls.mocked_headers = \"some_headers\"\n cls.body_dict = {\"some_key\": \"some_value\"}\n cls.body_str = \"some_body\"\n cls.body_bytes = b\"some_body\"\n cls.mocked_body_bytes = json.dumps(cls.body_str).encode(\"utf-8\")\n cls.mocked_query = Query(\n [Constraint(\"some_attribute_name\", ConstraintType(\"==\", \"some_value\"))],\n DataModel(\n \"some_data_model_name\",\n [\n Attribute(\n \"some_attribute_name\",\n str,\n False,\n \"Some attribute descriptions.\",\n )\n ],\n ),\n )\n cls.mocked_proposal = Description(\n {\n \"contract_address\": \"some_contract_address\",\n \"token_id\": \"123456\",\n \"trade_nonce\": \"876438756348568\",\n \"from_supply\": \"543\",\n \"to_supply\": \"432\",\n \"value\": \"67\",\n }\n )\n\n # list of messages\n cls.list_of_http_messages = (\n DialogueMessage(\n HttpMessage.Performative.REQUEST,\n {\n \"method\": cls.mocked_method,\n \"url\": cls.mocked_url,\n \"headers\": cls.mocked_headers,\n \"version\": cls.mocked_version,\n \"body\": cls.mocked_body_bytes,\n },\n is_incoming=False,\n ),\n )\n\n cls.list_of_oef_search_messages = (\n DialogueMessage(\n OefSearchMessage.Performative.SEARCH_SERVICES,\n {\"query\": cls.mocked_query},\n ),\n )",
"def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy4))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # Add are you a lucker strategies\n strategies.extend(\n generate_meta_strategy_pair(AreYouALuckerStrategy))\n\n # Add RPS Meta Fix strategies\n strategies.extend(\n generate_meta_strategy_pair(RPSMetaFixStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations",
"def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=False,\n ))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n do_rotations = [True for _ in strategies]\n return strategies, do_rotations",
"def setUp(self):\n test_helpers.patch_environ(self)\n\n data = []\n\n strategy1 = data_types.FuzzStrategyProbability()\n strategy1.strategy_name = 'fork,corpus_subset,'\n strategy1.probability = 0.33\n strategy1.engine = 'libFuzzer'\n data.append(strategy1)\n\n strategy2 = data_types.FuzzStrategyProbability()\n strategy2.strategy_name = 'random_max_len,value_profile,'\n strategy2.probability = 0.34\n strategy2.engine = 'libFuzzer'\n data.append(strategy2)\n ndb.put_multi(data)\n\n distribution = fuzz_task.get_strategy_distribution_from_ndb()\n\n environment.set_value('USE_BANDIT_STRATEGY_SELECTION', True)\n environment.set_value('STRATEGY_SELECTION_DISTRIBUTION', distribution)",
"def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=False,\n ))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n do_rotations = [True for _ in strategies]\n return strategies, do_rotations",
"def _get_strategies(self) -> Dict[str, str]:\n strategies = [method for method in dir(self) if STRATEGY_IDENTIFIER in method]\n\n if not strategies:\n logger.warning(\n \"There are no strategy provided. \"\n \"Make sure the implemented strategy methods \"\n \"start contain the '%s' term.\" % STRATEGY_IDENTIFIER\n )\n return {str(n_method): method for n_method, method in enumerate(strategies)}",
"def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy4))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # Add are you a lucker strategies\n strategies.extend(\n generate_meta_strategy_pair(AreYouALuckerStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations",
"def InitStrategy(self, sname, strategy):\n\n self._string = sname\n\n self.strategy = strategy\n self.postracker = position.PositionTracker(self.strategy)",
"def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=False,\n ))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations",
"def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=False,\n ))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations",
"def setup(cls):\n super().setup()\n cls.search_behaviour = cast(\n GenericSearchBehaviour, cls._skill.skill_context.behaviours.search\n )\n cls.tx_behaviour = cast(\n GenericTransactionBehaviour, cls._skill.skill_context.behaviours.transaction\n )\n cls.strategy = cast(GenericStrategy, cls._skill.skill_context.strategy)\n\n cls.logger = cls._skill.skill_context.logger",
"def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n strategies.extend(\n generate_meta_strategy_pair(\n WrappedRFindStrategy,\n limits=limits,\n sources=sources,\n shenanigans=False))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add memory pattern strategies\n strategies.extend(\n generate_meta_strategy_pair(MemoryPatternsV7Strategy))\n\n do_rotations = [True for _ in strategies]\n return strategies, do_rotations",
"def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations",
"def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations",
"def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy4))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations",
"def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=False,\n ))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(\n GeometryV4Strategy))\n\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations",
"def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations",
"def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=False,\n ))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations",
"def test_strategies(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.options.auto_fence = True\n self.supervisor.supvisors.options.conciliation_strategy = 1\n self.supervisor.supvisors.options.starting_strategy = 2\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n self.assertDictEqual({'auto-fencing': True, 'starting': 'MOST_LOADED',\n 'conciliation': 'INFANTICIDE'}, rpc.get_strategies())",
"def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n\n strategies.extend([\n generate_meta_strategy_pair(\n WrappedRFindStrategy,\n limits=limits,\n sources=sources,\n shenanigans=False)[0]])\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations",
"def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations"
] |
[
"0.6532167",
"0.5933181",
"0.55383205",
"0.5475055",
"0.5461148",
"0.54593635",
"0.54482967",
"0.5417319",
"0.5391583",
"0.53656155",
"0.53485143",
"0.5343126",
"0.5342159",
"0.5331955",
"0.5308945",
"0.53017586",
"0.5301568",
"0.5298491",
"0.5298491",
"0.5286031",
"0.526086",
"0.5240757",
"0.5238277",
"0.52226776",
"0.5219429",
"0.521794",
"0.5214315",
"0.52093565",
"0.51991594",
"0.5181124"
] |
0.60849404
|
1
|
This is an ADMM solver for the (Latent variable) Single Graphical Lasso problem (SGL). If ``latent=False``, this function solves
|
def ADMM_SGL(S, lambda1, Omega_0, Theta_0=np.array([]), X_0=np.array([]),
rho=1., max_iter=1000, tol=1e-7, rtol=1e-4, stopping_criterion='boyd',\
update_rho=True, verbose=False, measure=False, latent=False, mu1=None):
assert Omega_0.shape == S.shape
assert S.shape[0] == S.shape[1]
assert lambda1 > 0
assert stopping_criterion in ["boyd", "kkt"]
if latent:
assert mu1 is not None
assert mu1 > 0
(p, p) = S.shape
assert rho > 0, "ADMM penalization parameter must be positive."
# initialize
Omega_t = Omega_0.copy()
if len(Theta_0) == 0:
Theta_0 = Omega_0.copy()
if len(X_0) == 0:
X_0 = np.zeros((p, p))
Theta_t = Theta_0.copy()
L_t = np.zeros((p, p))
X_t = X_0.copy()
runtime = np.zeros(max_iter)
residual = np.zeros(max_iter)
status = ''
if verbose:
print("------------ADMM Algorithm for Single Graphical Lasso----------------")
if stopping_criterion == 'boyd':
hdr_fmt = "%4s\t%10s\t%10s\t%10s\t%10s"
out_fmt = "%4d\t%10.4g\t%10.4g\t%10.4g\t%10.4g"
print(hdr_fmt % ("iter", "r_t", "s_t", "eps_pri", "eps_dual"))
elif stopping_criterion == 'kkt':
hdr_fmt = "%4s\t%10s"
out_fmt = "%4d\t%10.4g"
print(hdr_fmt % ("iter", "kkt residual"))
##################################################################
### MAIN LOOP STARTS
##################################################################
for iter_t in np.arange(max_iter):
if measure:
start = time.time()
# Omega Update
W_t = Theta_t - L_t - X_t - (1 / rho) * S
eigD, eigQ = np.linalg.eigh(W_t)
Omega_t_1 = Omega_t.copy()
Omega_t = phiplus(beta=1 / rho, D=eigD, Q=eigQ)
# Theta Update
Theta_t = prox_od_1norm(Omega_t + L_t + X_t, (1 / rho) * lambda1)
# L Update
if latent:
C_t = Theta_t - X_t - Omega_t
# C_t = (C_t.T + C_t)/2
eigD1, eigQ1 = np.linalg.eigh(C_t)
L_t = prox_rank_norm(C_t, mu1/rho, D=eigD1, Q=eigQ1)
# X Update
X_t = X_t + Omega_t - Theta_t + L_t
if measure:
end = time.time()
runtime[iter_t] = end - start
# Stopping criterion
if stopping_criterion == 'boyd':
r_t,s_t,e_pri,e_dual = ADMM_stopping_criterion(Omega_t, Omega_t_1, Theta_t, L_t, X_t,\
S, rho, tol, rtol, latent)
# update rho
if update_rho:
if r_t >= 10*s_t:
rho_new = 2*rho
elif s_t >= 10*r_t:
rho_new = 0.5*rho
else:
rho_new = 1.*rho
# rescale dual variables
X_t = (rho/rho_new)*X_t
rho = rho_new
residual[iter_t] = max(r_t,s_t)
if verbose:
print(out_fmt % (iter_t,r_t,s_t,e_pri,e_dual))
if (r_t <= e_pri) and (s_t <= e_dual):
status = 'optimal'
break
elif stopping_criterion == 'kkt':
eta_A = kkt_stopping_criterion(Omega_t, Theta_t, L_t, rho * X_t, S, lambda1, latent, mu1)
residual[iter_t] = eta_A
if verbose:
print(out_fmt % (iter_t,eta_A))
if eta_A <= tol:
status = 'optimal'
break
##################################################################
### MAIN LOOP FINISHED
##################################################################
# retrieve status (partially optimal or max iter)
if status != 'optimal':
if stopping_criterion == 'boyd':
if (r_t <= e_pri):
status = 'primal optimal'
elif (s_t <= e_dual):
status = 'dual optimal'
else:
status = 'max iterations reached'
else:
status = 'max iterations reached'
print(f"ADMM terminated after {iter_t+1} iterations with status: {status}.")
### CHECK FOR SYMMETRY
if abs((Omega_t).T - Omega_t).max() > 1e-5:
warnings.warn(f"Omega variable is not symmetric, largest deviation is {abs((Omega_t).T - Omega_t).max()}.")
if abs((Theta_t).T - Theta_t).max() > 1e-5:
warnings.warn(f"Theta variable is not symmetric, largest deviation is {abs((Theta_t).T - Theta_t).max()}.")
if abs((L_t).T - L_t).max() > 1e-5:
warnings.warn(f"L variable is not symmetric, largest deviation is {abs((L_t).T - L_t).max()}.")
### CHECK FOR POSDEF
D = np.linalg.eigvalsh(Theta_t - L_t)
if D.min() <= 0:
print(
f"WARNING: Theta (Theta - L resp.) is not positive definite. Solve to higher accuracy! (min EV is {D.min()})")
if latent:
D = np.linalg.eigvalsh(L_t)
if D.min() < -1e-8:
print(f"WARNING: L is not positive semidefinite. Solve to higher accuracy! (min EV is {D.min()})")
if latent:
sol = {'Omega': Omega_t, 'Theta': Theta_t, 'L': L_t, 'X': X_t}
else:
sol = {'Omega': Omega_t, 'Theta': Theta_t, 'X': X_t}
if measure:
info = {'status': status, 'runtime': runtime[:iter_t+1], 'residual': residual[:iter_t+1]}
else:
info = {'status': status}
return sol, info
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def solver_mll(X, y, C, S, alpha=0.1, max_iter=1000, tol=1e-4, positive=False):\n n_tasks, n_samples, n_features = X.shape\n lasso = Lasso(alpha=alpha, fit_intercept=False, positive=positive)\n lasso_p = Lasso(alpha=alpha / n_tasks, fit_intercept=False,\n positive=True)\n old_theta = C[:, None] * S\n\n for i in range(max_iter):\n W = X * C[None, None, :]\n for k in range(n_tasks):\n lasso.fit(W[k], y[k])\n S[:, k] = lasso.coef_\n Z = S.T[:, None, :] * X\n Z = Z.reshape(n_tasks * n_samples, n_features)\n lasso_p.fit(Z, y.flatten())\n C = lasso_p.coef_\n theta = C[:, None] * S\n dll = abs(theta - old_theta).max()\n dll /= max(abs(theta).max(), abs(old_theta).max(), 1.)\n old_theta = theta.copy()\n\n if dll < tol:\n break\n\n if i == max_iter - 1:\n warnings.warn('Objective did not converge.' +\n ' You might want' +\n ' to increase the number of iterations.' +\n ' Fitting data with very small alpha' +\n ' may cause precision problems.',\n ConvergenceWarning)\n return C, S, i",
"def LinearModel(G,x=0,i0=0.1,L1='L',D=-0.01,tf=5,Nt=1000):\r\n #set up graph atteributes\r\n N = G.number_of_nodes()\r\n degree_arr=np.asarray(G.degree(),dtype=int)[:,1]\r\n iarray = np.zeros((Nt+1,N))\r\n tarray = np.linspace(0,tf,Nt+1)\r\n #calucalte operaters and set intial conditions\r\n A=nx.adjacency_matrix(G)\r\n L=-D*(scipy.sparse.diags(degree_arr)-A)\r\n Ls=-D*(scipy.sparse.diags(np.ones(N))-scipy.sparse.diags(1/degree_arr).dot(A))\r\n\r\n y0=np.zeros(N)\r\n y0[x]=i0\r\n #set up operators\r\n\r\n if L1=='Ls':\r\n L=Ls\r\n elif L1=='Lst':\r\n L=Ls.transpose()\r\n\r\n #define operators\r\n def Lap(y,t):\r\n return scipy.sparse.csr_matrix.__mul__(L,y)\r\n\r\n iarray[:,:]=scipy.integrate.odeint(Lap,y0,tarray)\r\n\r\n return iarray",
"def __solve_full_linear_problem(self):\n samples = []\n\n for news in self.news_pool:\n samples += [news.sampled_quality] * self.layout_slots\n\n self.full_C = np.array(samples) * self.full_lambdas\n\n linear_problem = opt.linprog(A_ub=self.full_A, b_ub=self.full_B, c=self.full_C)\n slots_assegnation_probabilities = []\n slot_counter = 0\n tmp_slot_probabilities = []\n while slot_counter < self.layout_slots:\n i = slot_counter\n while i < len(linear_problem.x):\n tmp_slot_probabilities.append(np.abs(linear_problem.x[i]))\n i += self.layout_slots\n slots_assegnation_probabilities.append(tmp_slot_probabilities.copy())\n tmp_slot_probabilities.clear()\n slot_counter += 1\n\n result = self.__de_randomize_LP(self.news_pool, slots_assegnation_probabilities, self.lp_rand_tech)\n\n return result",
"def fit(self):\n import networkx as nx\n import torch\n # Step 1. Calculate the Laplacian matrix\n L = nx.laplacian_matrix(self.Graph)\n nodelist = self.Graph.nodes()\n K = L.shape[0]\n\n # Step 2. Get the data in the right format \n cache = self.loss_function(self.data_train)\n \n # Step 3. Compute the proximal loss\n def proximal_loss(t, nu, warm_start, pool, cache=cache):\n XtX = cache['XtX']\n XtY = cache['XtY']\n n = cache['n']\n # LU = X'X + 0.5 * t * I\n Alu = torch.lu(XtX + 1./(2 * t) * torch.eye(n).unsqueeze(0).double())\n b = XtY + 1./(2 * t) * torch.from_numpy(nu)\n x = torch.lu_solve(b, *Alu).numpy()\n return x\n\n def proximal_residual(t, nu, warm_start, pool, lambda_val=1e-4):\n return nu / (1. + t * lambda_val)\n\n G_to_data = self._graph_to_data(cache['alpha_shape'])\n result, info = self._stratified_model_admm(shape=cache['shape'], \\\n Lap=L, \\\n loss_proximal_func=proximal_loss, \\\n regulariser_proximal_func=proximal_residual, \\\n graph_data=G_to_data)\n print(info)\n return self._output_to_graph(result)",
"def latent_grad(mu_S, kern, Z, dL_dpsi0, dL_dpsi1, dL_dpsi2):\r\n mu, log_S = mu_S.reshape(2, 1, -1)\r\n S = np.exp(log_S)\r\n\r\n mu0, S0 = kern.dpsi0_dmuS(dL_dpsi0, Z, mu, S)\r\n mu1, S1 = kern.dpsi1_dmuS(dL_dpsi1, Z, mu, S)\r\n mu2, S2 = kern.dpsi2_dmuS(dL_dpsi2, Z, mu, S)\r\n\r\n dmu = mu0 + mu1 + mu2 - mu\r\n # dS = S0 + S1 + S2 -0.5 + .5/S\r\n dlnS = S * (S0 + S1 + S2 - 0.5) + .5\r\n\r\n return -np.hstack((dmu.flatten(), dlnS.flatten()))",
"def block_SGL(S, lambda1, Omega_0, Theta_0=None, X_0=None, rho=1., max_iter=1000, \n tol=1e-7, rtol=1e-3, stopping_criterion=\"boyd\",\n update_rho=True, verbose=False, measure=False):\n assert Omega_0.shape == S.shape\n assert S.shape[0] == S.shape[1]\n assert lambda1 > 0\n\n (p, p) = S.shape\n\n if Theta_0 is None:\n Theta_0 = Omega_0.copy()\n if X_0 is None:\n X_0 = np.zeros((p, p))\n\n # compute connected components of S with lambda_1 threshold\n numC, allC = get_connected_components(S, lambda1)\n\n allOmega = list()\n allTheta = list()\n allX = list()\n\n for i in range(numC):\n C = allC[i]\n\n # single node connected components have a closed form solution, see Witten, Friedman, Simon \"NEW INSIGHTS FOR THE GRAPHICAL LASSO \"\n if len(C) == 1:\n # we use the OFF-DIAGONAL l1-penalty, otherwise it would be 1/(S[C,C]+lambda1)\n closed_sol = 1 / (S[C, C])\n\n allOmega.append(closed_sol)\n allTheta.append(closed_sol)\n allX.append(np.array([0]))\n\n\n # else solve Graphical Lasso for the corresponding block\n else:\n block_S = S[np.ix_(C, C)]\n block_sol, block_info = ADMM_SGL(S=block_S, lambda1=lambda1, Omega_0=Omega_0[np.ix_(C, C)],\n Theta_0=Theta_0[np.ix_(C, C)], X_0=X_0[np.ix_(C, C)], tol=tol, rtol=rtol,\n stopping_criterion=stopping_criterion, update_rho=update_rho,\n rho=rho, max_iter=max_iter, verbose=verbose, measure=measure)\n\n allOmega.append(block_sol['Omega'])\n allTheta.append(block_sol['Theta'])\n allX.append(block_sol['X'])\n\n # compute inverse permutation\n per = np.hstack(allC)\n per1 = invert_permutation(per)\n\n # construct solution by applying inverse permutation indexing\n sol = dict()\n sol['Omega'] = block_diag(*allOmega)[np.ix_(per1, per1)]\n sol['Theta'] = block_diag(*allTheta)[np.ix_(per1, per1)]\n sol['X'] = block_diag(*allX)[np.ix_(per1, per1)]\n\n return sol",
"def set_DirichletSS_sparse(self):\n \n \n self.set_Dirichlet_vessel(self.inlet)\n\n\n self.tissue_consumption(self.Mt)\n \n #REINITIALISATION OF THE VECTOR OF TISSUE PHI!!!\n self.phi_t=np.zeros(len(self.phit))\n \n self.set_Dirichlet_north(0)\n self.set_Dirichlet_east(0)\n self.set_Dirichlet_west(0)\n \n self.A.eliminate_zeros()",
"def solver_mll(X, y, alpha=0.1, C=None, S=None, callback=None, positive=False,\n maxiter=1000, tol=1e-4, compute_obj=False):\n n_tasks, n_samples, n_features = X.shape\n lasso = Lasso(alpha=alpha, fit_intercept=False,\n positive=positive)\n lasso_p = Lasso(alpha=alpha / n_tasks, fit_intercept=False,\n positive=True)\n if S is None:\n S = np.zeros((n_features, n_tasks))\n if C is None:\n C = np.ones(n_features)\n else:\n if C.max() <= 0:\n C = np.ones(n_features)\n\n old_theta = C[:, None] * S\n objs = []\n if compute_obj or callback:\n ll = objective(X, y, C, S, alpha)\n objs.append(ll)\n for i in range(maxiter):\n # W = block_diag(X * C[None, None, :], \"csc\")\n # lasso.fit(W, y.flatten())\n # S = lasso.coef_.reshape(n_tasks, n_features).T\n W = X * C[None, None, :]\n for k in range(n_tasks):\n lasso.fit(W[k], y[k])\n S[:, k] = lasso.coef_\n Z = S.T[:, None, :] * X\n Z = Z.reshape(n_tasks * n_samples, n_features)\n lasso_p.fit(Z, y.flatten())\n C = lasso_p.coef_\n theta = C[:, None] * S\n dll = abs(theta - old_theta).max()\n dll /= max(theta.max(), old_theta.max(), 1.)\n old_theta = theta.copy()\n if compute_obj or callback:\n ll = objective(X, y, C, S, alpha)\n objs.append(ll)\n if callback:\n callback(theta, obj=ll)\n if dll < tol:\n break\n\n if i == maxiter - 1:\n print(\"**************************************\\n\"\n \"******** WARNING: Stopped early. *****\\n\"\n \"\\n\"\n \"You may want to increase maxiter. Last err: %f\" % dll)\n return C, S, objs",
"def fit(self, adjacency: Union[sparse.csr_matrix, np.ndarray]) -> 'LaplacianEmbedding':\n adjacency = check_format(adjacency).asfptype()\n check_square(adjacency)\n check_symmetry(adjacency)\n n = adjacency.shape[0]\n\n regularize: bool = not (self.regularization is None or self.regularization == 0.)\n check_scaling(self.scaling, adjacency, regularize)\n\n if regularize:\n solver: EigSolver = LanczosEig()\n else:\n solver = set_solver(self.solver, adjacency)\n n_components = 1 + check_n_components(self.n_components, n-2)\n\n weights = adjacency.dot(np.ones(n))\n regularization = self.regularization\n if regularization:\n if self.relative_regularization:\n regularization = regularization * weights.sum() / n ** 2\n weights += regularization * n\n laplacian = LaplacianOperator(adjacency, regularization)\n else:\n weight_diag = sparse.diags(weights, format='csr')\n laplacian = weight_diag - adjacency\n\n solver.which = 'SM'\n solver.fit(matrix=laplacian, n_components=n_components)\n eigenvalues = solver.eigenvalues_[1:]\n eigenvectors = solver.eigenvectors_[:, 1:]\n\n embedding = eigenvectors.copy()\n\n if self.scaling:\n eigenvalues_inv_diag = diag_pinv(eigenvalues ** self.scaling)\n embedding = eigenvalues_inv_diag.dot(embedding.T).T\n\n if self.normalized:\n embedding = normalize(embedding, p=2)\n\n self.embedding_ = embedding\n self.eigenvalues_ = eigenvalues\n self.eigenvectors_ = eigenvectors\n self.regularization_ = regularization\n\n return self",
"def solver(u_init, eta_0, eta, eta_lin, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, ftol = 1e-3, max_iter = 5000, verbose = 0, nnls_max_iter=30):\n\n # Raise('NotImplementedError: only adjusted the arguments.')\n #Need to incorporate L_lhs into stacked and appropriate w_lin updates, u_update and eta_lin increments\n #precompute the expensive operation:\n lin_penalties = 1/np.sqrt(2*eta_lin)\n eta_T_H_L_stacked = scipy.sparse.vstack([T.multiply(1/np.sqrt(2*eta_0))] + [H[i].multiply(1/np.sqrt(2*eta[i])) for i in range(len(H))] + [L_lhs.multiply(lin_penalties[:,None])])\n #!!!!\n# premultiplied_lhs = eta_T_H_stacked.T.dot(eta_T_H_stacked).toarray()\n #!!!!\n u_prev = u_init + 1\n u = u_init\n count = 0\n obj_history = []\n relaxed_obj_history = [-1, 0.1] #just two initial values to enter the loop\n while np.abs((relaxed_obj_history[-2] - relaxed_obj_history[-1])/relaxed_obj_history[-2]) > ftol and count < max_iter:#np.linalg.norm(u - u_prev, np.inf) > 1e-3 and count < max_iter: #Maybe all of them stop changing\n start = time.time()\n \n u_prev = np.copy(u)\n w_0 = w_0_update(eta_0, u, T, alpha, B) \n w = w_update(u, H, gamma, D, C) \n w_lin = w_lin_update(u, L_lhs, L_rhs)\n# u = u_update(eta_0, eta, w_0, w, eta_T_H_stacked, nnls_max_iter=50)\n #!!!!\n # u = u_update(eta_0, eta, w_0, w, eta_T_H_L_stacked, nnls_max_iter=30)\n u = u_update(eta_0, eta, eta_lin, w_0, w, w_lin, eta_T_H_L_stacked, premultiplied_lhs = None, nnls_max_iter=nnls_max_iter)\n #!!!!\n count += 1 \n if count == 10:\n u_inf = np.copy(u)\n w_0_inf = w_0[:]\n w_inf = w[:]\n w_lin_inf = w_lin[:]\n if count > 10 and np.abs(cur_obj) > 1e+15: #HANDLE THIS BETTER!!!\n print('INFINITY! RETURNING u at the 10-th iteration to enter the feasibility loop')\n return u_inf, w_0_inf, w_inf, w_lin_inf, obj_history, relaxed_obj_history\n \n cur_obj = obj_u_opt_N_fixed(u, T, alpha, B)\n obj_history.append(cur_obj)\n cur_relaxed_obj = relaxed_obj_u_opt_N_fixed(u, w_0, w, w_lin, eta_0, eta, eta_lin, T, H, L_lhs, alpha, B)\n # relaxed_obj_u_opt_N_fixed(u, w_0, w, eta_0, eta, T, H, alpha, B)\n relaxed_obj_history.append(cur_relaxed_obj) \n \n stop = time.time()\n duration = stop-start\n \n if count%1 == 0 and verbose: \n stopping_criterion = np.abs((relaxed_obj_history[-2] - relaxed_obj_history[-1])/relaxed_obj_history[-2])\n print(' iter = {}, stopping criterion:{}, OBJ {}'.format(count, stopping_criterion, cur_obj))\n print(' This iteration took: {}'.format(duration))\n return u, w_0, w, w_lin, obj_history, relaxed_obj_history",
"def solveLSM(self):\n ierr = c_int(1)\n self.fteik2d.fteik_solver2d_solveLSM(ierr)\n if (ierr.value != 0):\n print(\"Error solving eikonal equation\")\n return -1\n return 0\n #errorAll = 0\n #for i in range(self.nsrc):\n # isrc = i + 1\n # self.fteik2d.fteik_solver2d_solveSourceLSM(isrc, ierr)\n # if (ierr.value != 0):\n # print(\"Failed to solve for source %d\"%i+1)\n # errorAll = errorAll + 1\n #return errorAll",
"def latent_cost_and_grad(mu_S, kern, Z, dL_dpsi0, dL_dpsi1, dL_dpsi2):\r\n mu, log_S = mu_S.reshape(2, 1, -1)\r\n S = np.exp(log_S)\r\n\r\n psi0 = kern.psi0(Z, mu, S)\r\n psi1 = kern.psi1(Z, mu, S)\r\n psi2 = kern.psi2(Z, mu, S)\r\n\r\n lik = dL_dpsi0 * psi0 + np.dot(dL_dpsi1.flatten(), psi1.flatten()) + np.dot(dL_dpsi2.flatten(), psi2.flatten()) - 0.5 * np.sum(np.square(mu) + S) + 0.5 * np.sum(log_S)\r\n\r\n mu0, S0 = kern.dpsi0_dmuS(dL_dpsi0, Z, mu, S)\r\n mu1, S1 = kern.dpsi1_dmuS(dL_dpsi1, Z, mu, S)\r\n mu2, S2 = kern.dpsi2_dmuS(dL_dpsi2, Z, mu, S)\r\n\r\n dmu = mu0 + mu1 + mu2 - mu\r\n # dS = S0 + S1 + S2 -0.5 + .5/S\r\n dlnS = S * (S0 + S1 + S2 - 0.5) + .5\r\n return -lik, -np.hstack((dmu.flatten(), dlnS.flatten()))",
"def Lasso(X0, Y, lam, w=np.array([0]), maxit=100, normalize=2):\n\n # Obtain size of X\n n, d = X0.shape\n X = np.zeros((n, d), dtype=np.complex64)\n Y = Y.reshape(n, 1)\n\n # Create w if none is given\n if w.size != d:\n w = np.zeros((d, 1), dtype=np.complex64)\n w_old = np.zeros((d, 1), dtype=np.complex64)\n\n # First normalize data\n if normalize != 0:\n Mreg = np.zeros((d, 1))\n for i in range(0, d):\n Mreg[i] = 1.0 / (np.linalg.norm(X0[:, i], normalize))\n X[:, i] = Mreg[i] * X0[:, i]\n else:\n X = X0\n\n # Lipschitz constant of gradient of smooth part of loss function\n L = np.linalg.norm(X.T.dot(X), 2)\n\n # Now loop until converged or max iterations\n for iters in range(0, maxit):\n\n # Update w\n z = w + iters / float(iters + 1) * (w - w_old)\n w_old = w\n z = z - X.T.dot(X.dot(z) - Y) / L\n for j in range(d):\n w[j] = np.multiply(np.sign(z[j]), np.max([abs(z[j]) - lam / L, 0]))\n\n # Could put in some sort of break condition based on convergence here.\n\n # Now that we have the sparsity pattern, used least squares.\n biginds = np.where(w != 0)[0]\n if biginds != []: w[biginds] = np.linalg.lstsq(X[:, biginds], Y)[0]\n\n # Finally, reverse the regularization so as to be able to use with raw data\n if normalize != 0:\n return np.multiply(Mreg, w)\n else:\n return w",
"def anl_solution(self):\r\n\r\n m = float(self.mass) / self.nu_m\r\n qe = 1 / self.nu_m * (self.nu_t * self.nu_t / self.nu_x) * 1.0 \\\r\n / float(self.size_tick * self.size_tick)\r\n print 'qE=', qe\r\n c = self.light_vel\r\n for i in range(0, len(self.obs.obt_g)):\r\n ddt = float(self.obs.obt[i] - self.obs.obt[i - 1])\r\n x = m * c ** 2 / qe * (math.sqrt(1.0 + (qe * self.t[i] / (m\r\n * c)) ** 2) - 1.0)\r\n self.xa_track.append(x)\r\n p = qe * self.t[i]\r\n self.pa.append(p)\r\n v = p / math.sqrt(m ** 2 + (p / c) ** 2)\r\n jv = self.t[i] * qe / (m * c)\r\n v = math.sqrt(jv * jv / (1 + jv * jv)) * c\r\n self.va.append(v)\r\n print 'Analytical solution of the differential equation of motion'",
"def write_ldl_lsolve(f, variables):\n\n data = variables['data']\n priv = variables['priv']\n Lp = priv['L']['p']\n\n f.write(\"void LDL_lsolve(LDL_int n, c_float X [ ], LDL_int Lp [ ]\")\n f.write(\", LDL_int Li [ ], c_float Lx [ ]){\\n\")\n f.write(\"LDL_int p;\\n\")\n\n # Unroll for loop\n for j in range(data['m'] + data['n']):\n if Lp[j+1] > Lp[j]: # Write loop ONLY if necessary\n f.write(\"for (p = %i ; p < %i ; p++){\\n\" % (Lp[j], Lp[j+1]))\n f.write(\"X [Li [p]] -= Lx [p] * X [%i];\\n\" % (j))\n f.write(\"}\\n\")\n\n # Close function\n f.write(\"}\\n\\n\")",
"def dLdp(C1s,C0s,ks,bs,sigma=1):\n # return np.array(jit(jacfwd(L,argnums=1))(q,ps,C1s,C0s,ks,bs,sigma))\n \n # A = FIM(q,ps,C1s,C0s,ks,bs,sigma)\n \n # Construct A(q,ps)\n A = FIM(C1s,C0s,ks,bs,sigma)\n\n # Construct dAdp(q,ps)\n dAdp = jit(jacfwd(A,argnums=1))\n \n # Construct inv_A(q,ps)\n inv_A=lambda q,ps: jnp.linalg.inv(A(q,ps))\n \n # print(np.trace(-dAinv(inv_A,dAdp),axis1=0,axis2=1)-np.array(jit(jacfwd(L,argnums=1))(q,ps,C1s,C0s,ks,bs,sigma)))\n \n # Construct dLdP(q,ps)\n\n\n\n return lambda q,ps: -np.array(jnp.trace(dAinv(inv_A(q,ps),dAdp(q,ps)),axis1=0,axis2=1))",
"def test_sdp(self):\n a = sp.rand(100, 100, .1, random_state=1)\n a = a.todense()\n X = Variable(100, 100)\n obj = at.norm(X, \"nuc\") + at.norm(X-a, 'fro')\n p = Problem(Minimize(obj))\n p.solve(solver=\"SCS\")",
"def case_spd_linsys(\n spd_matrix: Union[np.ndarray, scipy.sparse.spmatrix, linops.LinearOperator],\n rng: np.random.Generator,\n) -> problems.LinearSystem:\n return random_linear_system(rng=rng, matrix=spd_matrix)",
"def fit(self, X):\n self._causal_order = None\n self._adjacency_matrices = None\n\n X = check_array(X)\n\n lingam_model = self._lingam_model\n if lingam_model is None:\n lingam_model = DirectLiNGAM()\n elif not isinstance(lingam_model, _BaseLiNGAM):\n raise ValueError(\"lingam_model must be a subclass of _BaseLiNGAM\")\n\n phis = self._ar_coefs\n thetas = self._ma_coefs\n order = self._order\n\n if phis is None or thetas is None:\n phis, thetas, order, residuals = self._estimate_varma_coefs(X)\n else:\n p = phis.shape[0]\n q = thetas.shape[0]\n residuals = self._calc_residuals(X, phis, thetas, p, q)\n\n model = lingam_model\n model.fit(residuals)\n\n psis, omegas = self._calc_psi_and_omega(\n model.adjacency_matrix_, phis, thetas, order\n )\n\n if self._prune:\n ee = np.dot(\n np.eye(model.adjacency_matrix_.shape[0]) - model.adjacency_matrix_,\n residuals.T,\n ).T\n psis, omegas = self._pruning(X, ee, order, model.causal_order_)\n\n self._ar_coefs = phis\n self._ma_coefs = thetas\n self._order = order\n self._residuals = residuals\n\n self._causal_order = model.causal_order_\n self._adjacency_matrices = (psis, omegas)\n\n return self",
"def experiment_linear_l1(_):\n # Attack epsilon is manually set according to the norm of the min-norm\n # solution found using cvxpy for d/n=10. That is max-margin=1/min-norm.\n # Min linf-norm solution found (norm=0.0422)\n # Min l2-norm solution found (norm=0.3411)\n # Min l1-norm solution found (norm=1.8497)\n # Min l4-norm solution found (norm=0.0002)\n # Min l1.5-norm solution found (norm=0.5274)\n return experiment_linear_lp(\n adv_norm_type='l1',\n dual_norm_type='linf',\n baseline_norm_types=['l2'],\n attack_step_dir='grad_max')",
"def optimize_linear(grads, eps, ordr):\n\n red_ind = list(range(1, len(grads.size())))\n azdiv = torch.tensor(1e-12, dtype=grads.dtype, device=grads.device)\n\n if ordr == np.inf:\n opt_pert = torch.sign(grads)\n\n elif ordr == 1:\n abs_grad = torch.abs(grads)\n sign = torch.sign(grads)\n ori_shape = [1] * len(grads.size())\n ori_shape[0] = grads.size(0)\n\n max_abs_grad, _ = torch.max(abs_grad.view(grads.size(0), -1), 1)\n max_mask = abs_grad.eq(max_abs_grad.view(ori_shape)).float()\n num_ties = max_mask\n for red_scalar in red_ind:\n num_ties = torch.sum(num_ties, red_scalar, keepdims=True)\n opt_pert = sign * max_mask / num_ties\n # TODO tests\n\n elif ordr == 2:\n # TODO\n square = torch.max(azdiv, torch.sum(grads ** 2, red_ind, keepdim=True))\n opt_pert = grads / torch.sqrt(square)\n # TODO tests\n else:\n raise NotImplementedError('Only L-inf, L1 and L2 norms are '\n 'currently implemented.')\n\n scaled_pert = eps * opt_pert\n return scaled_pert",
"def laplacian( graph : SpatialGraph, \n sparse : bool = False\n ) -> Union[np.ndarray, sp.spmatrix] :\n adj = adjacency(graph, sparse=sparse)\n dgr = sp.diags(np.array(adj.sum(1))) if sparse else np.diag(np.array(adj.sum(1)))\n return adj - dgr",
"def LML(self, theta):\n t = [exp(h) for h in theta]\n a = t[0]\n s = array(t[1:])\n K_xx = self.build_covariance(a, s*self.scale_lengths)\n\n try: # protection against singular matrix error crash\n sgn, ldet = slogdet(K_xx)\n if sgn is -1: print(' # WARNING # - negative determinant')\n L = dot( self.y.T, solve( K_xx, self.y ) ) + ldet\n except:\n L = 1e50\n return L",
"def LDL(A, d):\n n = shape(A)[0]\n L = array(eye(n))\n dg = zeros(n)\n dg[0] = A[0, 0]\n for k in range(1, n):\n m = reshape(array(A[:k, k].copy()), k)\n rforwardsolve(L[:k, :k], m, d)\n L[k, :k] = m/dg[:k]\n dg[k] = A[k, k] - dot(L[k, :k], m)\n return L, dg",
"def solve_l1(y, A_fun, AT_fun, lambda_l1, reshape_img_fun, show_img_progress=False, alpha=0.2, max_iter=100, solver_tol=1e-6):\n\n\n obj_lss = np.zeros(max_iter)\n x_zs = np.zeros(max_iter)\n u_norms = np.zeros(max_iter)\n times = np.zeros(max_iter)\n\n ATy = AT_fun(y)\n x_shape = ATy.shape\n d = np.prod(x_shape)\n\n def A_cgs_fun(x):\n x = np.reshape(x, x_shape, order='F')\n y = AT_fun(A_fun(x)) + alpha * x\n return vec(y)\n A_cgs = LinearOperator((d,d), matvec=A_cgs_fun, dtype='float')\n\n def compute_p_inv_A(b, z0):\n (z,info) = sp.sparse.linalg.cgs(A_cgs, vec(b), x0=vec(z0), tol=1e-3, maxiter=100)\n if info > 0:\n print('cgs convergence to tolerance not achieved')\n elif info <0:\n print('cgs gets illegal input or breakdown')\n z = np.reshape(z, x_shape, order='F')\n return z\n\n\n def A_cgs_fun_init(x):\n x = np.reshape(x, x_shape, order='F')\n y = AT_fun(A_fun(x))\n return vec(y)\n A_cgs_init = LinearOperator((d,d), matvec=A_cgs_fun_init, dtype='float')\n\n def compute_init(b, z0):\n (z,info) = sp.sparse.linalg.cgs(A_cgs_init, vec(b), x0=vec(z0), tol=1e-2)\n if info > 0:\n print('cgs convergence to tolerance not achieved')\n elif info <0:\n print('cgs gets illegal input or breakdown')\n z = np.reshape(z, x_shape, order='F')\n return z\n\n # initialize z and u\n z = compute_init(ATy, ATy)\n u = np.zeros(x_shape)\n\n\n plot_normalozer = matplotlib.colors.Normalize(vmin=0.0, vmax=1.0, clip=True)\n\n\n start_time = timeit.default_timer()\n\n for iter in range(max_iter):\n\n # x-update\n net_input = z+u\n Wzu, wbook = wavelet_transform(net_input)\n q = soft_threshold(Wzu, lambda_l1/alpha)\n x = inverse_wavelet_transform(q, wbook, x_shape)\n x = np.reshape(x, x_shape)\n\n # z-update\n b = ATy + alpha * (x - u)\n z = compute_p_inv_A(b, z)\n\n # u-update\n u += z - x;\n\n if show_img_progress == True:\n\n fig = plt.figure('current_sol')\n plt.gcf().clear()\n fig.canvas.set_window_title('iter %d' % iter)\n plt.subplot(1,3,1)\n plt.imshow(reshape_img_fun(np.clip(x, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('x')\n plt.subplot(1,3,2)\n plt.imshow(reshape_img_fun(np.clip(z, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('z')\n plt.subplot(1,3,3)\n plt.imshow(reshape_img_fun(np.clip(net_input, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('netin')\n plt.pause(0.00001)\n\n\n obj_ls = 0.5 * np.sum(np.square(y - A_fun(x)))\n x_z = np.sqrt(np.mean(np.square(x-z)))\n u_norm = np.sqrt(np.mean(np.square(u)))\n\n print('iter = %d: obj_ls = %.3e |x-z| = %.3e u_norm = %.3e' % (iter, obj_ls, x_z, u_norm))\n\n\n obj_lss[iter] = obj_ls\n x_zs[iter] = x_z\n u_norms[iter] = u_norm\n times[iter] = timeit.default_timer() - start_time\n\n if x_z < solver_tol:\n break\n\n infos = {'obj_lss': obj_lss, 'x_zs': x_zs, 'u_norms': u_norms,\n 'times': times, 'alpha':alpha, 'lambda_l1':lambda_l1,\n 'max_iter':max_iter, 'solver_tol':solver_tol}\n\n\n return (x, z, u, infos)",
"def _ls_solver(A, B, warm_start=None):\n # TODO - do conjugate gradient if n is too large\n return np.linalg.lstsq(A.T, B.T)[0].T",
"def experiment_linear_linf(_):\n # Attack epsilon is manually set according to the norm of the min-norm\n # solution found using cvxpy for d/n=10. That is max-margin=1/min-norm.\n # Min linf-norm solution found (norm=0.0422)\n # Min l2-norm solution found (norm=0.3411)\n # Min l1-norm solution found (norm=1.8497)\n # Min l4-norm solution found (norm=0.0002)\n # Min l1.5-norm solution found (norm=0.5274)\n return experiment_linear_lp(\n adv_norm_type='linf',\n dual_norm_type='l1',\n baseline_norm_types=['l2'],\n attack_step_dir='sign_grad')",
"def calc_lampam_sym(ss, constraints):\n if isinstance(ss, list):\n lampam = np.zeros((len(ss), 12), float)\n for index in range(len(ss)):\n lampam[index] = calc_lampam_sym(ss[index], constraints)\n return lampam\n if ss.ndim == 2 and ss.shape[0] > 1:\n lampam = np.zeros((ss.shape[0], 12), float)\n for index in range(ss.shape[0]):\n lampam[index] = calc_lampam_sym(ss[index], constraints)\n return lampam\n\n n_plies_in_panels = 2 * np.size(ss) # laminate ply count\n\n cos_sin = np.empty((4, n_plies_in_panels // 2), float)\n for ind in range(n_plies_in_panels // 2):\n cos_sin[:, ind] = constraints.cos_sin[\n constraints.ind_angles_dict[ss[ind]]].reshape((4, ))\n\n for_the_top = np.arange(n_plies_in_panels // 2)\n z_0 = np.ones(n_plies_in_panels // 2)\n z_2 = ((1 - n_plies_in_panels / 2) * z_0 + for_the_top) ** 3 \\\n - ((1 - n_plies_in_panels / 2) * z_0 + for_the_top - 1) ** 3\n lampam = np.array([\n (2 / n_plies_in_panels)*np.matmul(cos_sin, z_0),\n np.array([0, 0, 0, 0]),\n (8 / n_plies_in_panels**3)*np.matmul(cos_sin, z_2)]).reshape(12)\n return lampam",
"def generate_direct_solver(self, grid=None):\n if grid is None:\n # LOG.debug(\"Generate Solver for internal Spare Matrix: %s\" % self.sp_matrix)\n solver = spla.factorized(self.sp_matrix)\n else:\n # LOG.debug(\"Generate Solver for given Grid %s\" % (grid,))\n sp_matrix = self.to_sparse_matrix(grid, \"csc\")\n # LOG.debug(\" with Sparse Matrix: %s\" % sp_matrix.todense())\n # print(\"Jahier\\n\", sp_matrix.todense())\n # print(\"Jahier.shape\\n\", sp_matrix.todense().shape)\n solver = spla.factorized(sp_matrix)\n return solver",
"def Schechter_L(L, phi_s, L_s, alpha):\n\treturn phi_s * (L / L_s)**alpha * n.e**(-L / L_s) / L_s"
] |
[
"0.5904229",
"0.56168336",
"0.5549464",
"0.5510236",
"0.54761505",
"0.54374045",
"0.5416033",
"0.53865016",
"0.5377905",
"0.5372089",
"0.53265786",
"0.5315447",
"0.52730715",
"0.5262824",
"0.52390164",
"0.52042484",
"0.5179361",
"0.5165328",
"0.51650435",
"0.5147611",
"0.5143179",
"0.5130516",
"0.51260084",
"0.51134115",
"0.51095116",
"0.51057214",
"0.5093891",
"0.5088568",
"0.50818145",
"0.5080147"
] |
0.65866923
|
0
|
This is a wrapper for solving SGL problems on connected components of the solution and solving each block separately. See Witten, Friedman, Simon "New Insights for the Graphical Lasso" for details. It solves
|
def block_SGL(S, lambda1, Omega_0, Theta_0=None, X_0=None, rho=1., max_iter=1000,
tol=1e-7, rtol=1e-3, stopping_criterion="boyd",
update_rho=True, verbose=False, measure=False):
assert Omega_0.shape == S.shape
assert S.shape[0] == S.shape[1]
assert lambda1 > 0
(p, p) = S.shape
if Theta_0 is None:
Theta_0 = Omega_0.copy()
if X_0 is None:
X_0 = np.zeros((p, p))
# compute connected components of S with lambda_1 threshold
numC, allC = get_connected_components(S, lambda1)
allOmega = list()
allTheta = list()
allX = list()
for i in range(numC):
C = allC[i]
# single node connected components have a closed form solution, see Witten, Friedman, Simon "NEW INSIGHTS FOR THE GRAPHICAL LASSO "
if len(C) == 1:
# we use the OFF-DIAGONAL l1-penalty, otherwise it would be 1/(S[C,C]+lambda1)
closed_sol = 1 / (S[C, C])
allOmega.append(closed_sol)
allTheta.append(closed_sol)
allX.append(np.array([0]))
# else solve Graphical Lasso for the corresponding block
else:
block_S = S[np.ix_(C, C)]
block_sol, block_info = ADMM_SGL(S=block_S, lambda1=lambda1, Omega_0=Omega_0[np.ix_(C, C)],
Theta_0=Theta_0[np.ix_(C, C)], X_0=X_0[np.ix_(C, C)], tol=tol, rtol=rtol,
stopping_criterion=stopping_criterion, update_rho=update_rho,
rho=rho, max_iter=max_iter, verbose=verbose, measure=measure)
allOmega.append(block_sol['Omega'])
allTheta.append(block_sol['Theta'])
allX.append(block_sol['X'])
# compute inverse permutation
per = np.hstack(allC)
per1 = invert_permutation(per)
# construct solution by applying inverse permutation indexing
sol = dict()
sol['Omega'] = block_diag(*allOmega)[np.ix_(per1, per1)]
sol['Theta'] = block_diag(*allTheta)[np.ix_(per1, per1)]
sol['X'] = block_diag(*allX)[np.ix_(per1, per1)]
return sol
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def gel_solve(\n A,\n y,\n l_1,\n l_2,\n ns,\n b_init=None,\n block_solve_fun=block_solve_agd,\n block_solve_kwargs=None,\n max_cd_iters=None,\n rel_tol=1e-6,\n Cs=None,\n Is=None,\n verbose=False,\n):\n p = len(A)\n m = len(y)\n device = A[0].device\n dtype = A[0].dtype\n y = y.to(device, dtype)\n if block_solve_kwargs is None:\n block_solve_kwargs = dict()\n\n # Create initial values if not specified.\n if b_init is None:\n b_init = 0.0, torch.zeros(p, max(ns), device=device, dtype=dtype)\n\n if not isinstance(ns, torch.Tensor):\n ns = torch.tensor(ns)\n sns = ns.to(device, dtype).sqrt()\n a_1 = l_1 * sns\n ma_1 = m * a_1\n a_2 = 2 * l_2 * sns\n b_0, B = b_init\n b_0_prev, B_prev = b_0, B\n k = 1 # iteration number\n pbar_stats = {} # stats for the outer progress bar\n pbar = tqdm.tqdm(\n desc=\"Solving gel with CD (l_1 {:.2g}, l_2 {:.2g})\".format(l_1, l_2),\n disable=not verbose,\n )\n\n while True:\n # First minimize with respect to b_0. This has a closed form solution\n # given by b_0 = 1'@(y - sum_j A_j@b_j) / m.\n b_0 = (y - sum(A[j] @ B[j, : ns[j]] for j in range(p))).sum() / m\n\n # Now, minimize with respect to each b_j.\n for j in tqdm.trange(\n p, desc=\"Solving individual blocks\", disable=not verbose, leave=False\n ):\n r_j = y - b_0 - sum(A[k] @ B[k, : ns[k]] for k in range(p) if k != j)\n\n # Check if b_j must be set to 0. The condition is ||A_j'@r_j|| <=\n # m*a_1.\n if (A[j].t() @ r_j).norm(p=2) <= ma_1[j]:\n B[j] = 0\n else:\n # Otherwise, minimize. First make sure initial value is not 0.\n if len((B[j, : ns[j]].abs() < 1e-6).nonzero()) == ns[j]:\n B[j, : ns[j]] = 1e-3\n\n # Add C_j and I_j to the arguments if using Newton's method.\n if block_solve_fun is block_solve_newton:\n block_solve_kwargs[\"C_j\"] = Cs[j]\n block_solve_kwargs[\"I_j\"] = Is[j]\n\n B[j, : ns[j]] = block_solve_fun(\n r_j,\n A[j],\n a_1[j].item(),\n a_2[j].item(),\n m,\n B[j, : ns[j]],\n verbose=verbose,\n **block_solve_kwargs,\n )\n\n # Compute relative change in b.\n b_0_diff = b_0 - b_0_prev\n B_diff = B - B_prev\n delta_norm = (b_0_diff ** 2 + (B_diff ** 2).sum()).sqrt()\n b_norm = (b_0 ** 2 + (B ** 2).sum()).sqrt()\n\n pbar_stats[\"rel change\"] = \"{:.2g}\".format(delta_norm.item() / b_norm.item())\n pbar.set_postfix(pbar_stats)\n pbar.update()\n\n # Check max iterations exit criterion.\n if max_cd_iters is not None and k == max_cd_iters:\n break\n k += 1\n\n # Check tolerance exit criterion.\n if delta_norm.item() <= rel_tol * b_norm.item() and k > 2:\n break\n b_0_prev, B_prev = b_0, B\n\n pbar.close()\n return b_0.item(), B",
"def solve_l1(y, A_fun, AT_fun, lambda_l1, reshape_img_fun, show_img_progress=False, alpha=0.2, max_iter=100, solver_tol=1e-6):\n\n\n obj_lss = np.zeros(max_iter)\n x_zs = np.zeros(max_iter)\n u_norms = np.zeros(max_iter)\n times = np.zeros(max_iter)\n\n ATy = AT_fun(y)\n x_shape = ATy.shape\n d = np.prod(x_shape)\n\n def A_cgs_fun(x):\n x = np.reshape(x, x_shape, order='F')\n y = AT_fun(A_fun(x)) + alpha * x\n return vec(y)\n A_cgs = LinearOperator((d,d), matvec=A_cgs_fun, dtype='float')\n\n def compute_p_inv_A(b, z0):\n (z,info) = sp.sparse.linalg.cgs(A_cgs, vec(b), x0=vec(z0), tol=1e-3, maxiter=100)\n if info > 0:\n print('cgs convergence to tolerance not achieved')\n elif info <0:\n print('cgs gets illegal input or breakdown')\n z = np.reshape(z, x_shape, order='F')\n return z\n\n\n def A_cgs_fun_init(x):\n x = np.reshape(x, x_shape, order='F')\n y = AT_fun(A_fun(x))\n return vec(y)\n A_cgs_init = LinearOperator((d,d), matvec=A_cgs_fun_init, dtype='float')\n\n def compute_init(b, z0):\n (z,info) = sp.sparse.linalg.cgs(A_cgs_init, vec(b), x0=vec(z0), tol=1e-2)\n if info > 0:\n print('cgs convergence to tolerance not achieved')\n elif info <0:\n print('cgs gets illegal input or breakdown')\n z = np.reshape(z, x_shape, order='F')\n return z\n\n # initialize z and u\n z = compute_init(ATy, ATy)\n u = np.zeros(x_shape)\n\n\n plot_normalozer = matplotlib.colors.Normalize(vmin=0.0, vmax=1.0, clip=True)\n\n\n start_time = timeit.default_timer()\n\n for iter in range(max_iter):\n\n # x-update\n net_input = z+u\n Wzu, wbook = wavelet_transform(net_input)\n q = soft_threshold(Wzu, lambda_l1/alpha)\n x = inverse_wavelet_transform(q, wbook, x_shape)\n x = np.reshape(x, x_shape)\n\n # z-update\n b = ATy + alpha * (x - u)\n z = compute_p_inv_A(b, z)\n\n # u-update\n u += z - x;\n\n if show_img_progress == True:\n\n fig = plt.figure('current_sol')\n plt.gcf().clear()\n fig.canvas.set_window_title('iter %d' % iter)\n plt.subplot(1,3,1)\n plt.imshow(reshape_img_fun(np.clip(x, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('x')\n plt.subplot(1,3,2)\n plt.imshow(reshape_img_fun(np.clip(z, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('z')\n plt.subplot(1,3,3)\n plt.imshow(reshape_img_fun(np.clip(net_input, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('netin')\n plt.pause(0.00001)\n\n\n obj_ls = 0.5 * np.sum(np.square(y - A_fun(x)))\n x_z = np.sqrt(np.mean(np.square(x-z)))\n u_norm = np.sqrt(np.mean(np.square(u)))\n\n print('iter = %d: obj_ls = %.3e |x-z| = %.3e u_norm = %.3e' % (iter, obj_ls, x_z, u_norm))\n\n\n obj_lss[iter] = obj_ls\n x_zs[iter] = x_z\n u_norms[iter] = u_norm\n times[iter] = timeit.default_timer() - start_time\n\n if x_z < solver_tol:\n break\n\n infos = {'obj_lss': obj_lss, 'x_zs': x_zs, 'u_norms': u_norms,\n 'times': times, 'alpha':alpha, 'lambda_l1':lambda_l1,\n 'max_iter':max_iter, 'solver_tol':solver_tol}\n\n\n return (x, z, u, infos)",
"def solve(self):\n\n # Assign variables to each quantity being solved.\n r_lookup, lookup, num = {}, {}, 0\n for element in self.elements:\n if is_wire(element) and element is not self.ground:\n lookup[num] = element\n r_lookup[element] = num\n num += 1\n elif not is_cs(element) and element is not self.ground:\n lookup[num] = element\n r_lookup[element] = num\n num += 1\n\n # Set up the linear algebraic equation Ax=b\n A = np.zeros((num, num))\n b = np.zeros(num)\n for row, element in lookup.items():\n if is_wire(element) and element is not self.ground:\n for two_sided in element.attached:\n if is_cs(two_sided):\n if two_sided.pos is element:\n b[row] += -1 * two_sided.current\n else:\n b[row] += two_sided.current\n else:\n if two_sided.pos is element:\n flow = 1\n else:\n flow = -1\n A[row, r_lookup[two_sided]] = flow\n elif is_vs(element):\n check_connected(element)\n if element.pos is not self.ground:\n A[row, r_lookup[element.pos]] = 1\n if element.neg is not self.ground:\n A[row, r_lookup[element.neg]] = -1\n b[row] = element.voltage\n elif is_resistor(element):\n check_connected(element)\n if element.pos is not self.ground:\n A[row, r_lookup[element.pos]] = 1\n if element.neg is not self.ground:\n A[row, r_lookup[element.neg]] = -1\n A[row, r_lookup[element]] = -1 * element.resistance\n\n b = b.reshape((num, 1))\n try:\n x = np.linalg.solve(A, b)\n except np.linalg.LinAlgError:\n raise CircuitError('Insufficient information to solve circuit')\n\n # Assign values to all circuit components\n for i in range(num):\n item = lookup[i]\n if is_wire(item):\n item.potential = x[i, 0]\n elif isinstance(item, DualSided):\n item.current = x[i, 0]\n\n # Mark circuit as solved\n self.been_solved = True",
"def solver(u_init, eta_0, eta, eta_lin, T, H, L_lhs, L_rhs, alpha, gamma, B, D, C, ftol = 1e-3, max_iter = 5000, verbose = 0, nnls_max_iter=30):\n\n # Raise('NotImplementedError: only adjusted the arguments.')\n #Need to incorporate L_lhs into stacked and appropriate w_lin updates, u_update and eta_lin increments\n #precompute the expensive operation:\n lin_penalties = 1/np.sqrt(2*eta_lin)\n eta_T_H_L_stacked = scipy.sparse.vstack([T.multiply(1/np.sqrt(2*eta_0))] + [H[i].multiply(1/np.sqrt(2*eta[i])) for i in range(len(H))] + [L_lhs.multiply(lin_penalties[:,None])])\n #!!!!\n# premultiplied_lhs = eta_T_H_stacked.T.dot(eta_T_H_stacked).toarray()\n #!!!!\n u_prev = u_init + 1\n u = u_init\n count = 0\n obj_history = []\n relaxed_obj_history = [-1, 0.1] #just two initial values to enter the loop\n while np.abs((relaxed_obj_history[-2] - relaxed_obj_history[-1])/relaxed_obj_history[-2]) > ftol and count < max_iter:#np.linalg.norm(u - u_prev, np.inf) > 1e-3 and count < max_iter: #Maybe all of them stop changing\n start = time.time()\n \n u_prev = np.copy(u)\n w_0 = w_0_update(eta_0, u, T, alpha, B) \n w = w_update(u, H, gamma, D, C) \n w_lin = w_lin_update(u, L_lhs, L_rhs)\n# u = u_update(eta_0, eta, w_0, w, eta_T_H_stacked, nnls_max_iter=50)\n #!!!!\n # u = u_update(eta_0, eta, w_0, w, eta_T_H_L_stacked, nnls_max_iter=30)\n u = u_update(eta_0, eta, eta_lin, w_0, w, w_lin, eta_T_H_L_stacked, premultiplied_lhs = None, nnls_max_iter=nnls_max_iter)\n #!!!!\n count += 1 \n if count == 10:\n u_inf = np.copy(u)\n w_0_inf = w_0[:]\n w_inf = w[:]\n w_lin_inf = w_lin[:]\n if count > 10 and np.abs(cur_obj) > 1e+15: #HANDLE THIS BETTER!!!\n print('INFINITY! RETURNING u at the 10-th iteration to enter the feasibility loop')\n return u_inf, w_0_inf, w_inf, w_lin_inf, obj_history, relaxed_obj_history\n \n cur_obj = obj_u_opt_N_fixed(u, T, alpha, B)\n obj_history.append(cur_obj)\n cur_relaxed_obj = relaxed_obj_u_opt_N_fixed(u, w_0, w, w_lin, eta_0, eta, eta_lin, T, H, L_lhs, alpha, B)\n # relaxed_obj_u_opt_N_fixed(u, w_0, w, eta_0, eta, T, H, alpha, B)\n relaxed_obj_history.append(cur_relaxed_obj) \n \n stop = time.time()\n duration = stop-start\n \n if count%1 == 0 and verbose: \n stopping_criterion = np.abs((relaxed_obj_history[-2] - relaxed_obj_history[-1])/relaxed_obj_history[-2])\n print(' iter = {}, stopping criterion:{}, OBJ {}'.format(count, stopping_criterion, cur_obj))\n print(' This iteration took: {}'.format(duration))\n return u, w_0, w, w_lin, obj_history, relaxed_obj_history",
"def __solve_full_linear_problem(self):\n samples = []\n\n for news in self.news_pool:\n samples += [news.sampled_quality] * self.layout_slots\n\n self.full_C = np.array(samples) * self.full_lambdas\n\n linear_problem = opt.linprog(A_ub=self.full_A, b_ub=self.full_B, c=self.full_C)\n slots_assegnation_probabilities = []\n slot_counter = 0\n tmp_slot_probabilities = []\n while slot_counter < self.layout_slots:\n i = slot_counter\n while i < len(linear_problem.x):\n tmp_slot_probabilities.append(np.abs(linear_problem.x[i]))\n i += self.layout_slots\n slots_assegnation_probabilities.append(tmp_slot_probabilities.copy())\n tmp_slot_probabilities.clear()\n slot_counter += 1\n\n result = self.__de_randomize_LP(self.news_pool, slots_assegnation_probabilities, self.lp_rand_tech)\n\n return result",
"def solveLSM(self):\n ierr = c_int(1)\n self.fteik2d.fteik_solver2d_solveLSM(ierr)\n if (ierr.value != 0):\n print(\"Error solving eikonal equation\")\n return -1\n return 0\n #errorAll = 0\n #for i in range(self.nsrc):\n # isrc = i + 1\n # self.fteik2d.fteik_solver2d_solveSourceLSM(isrc, ierr)\n # if (ierr.value != 0):\n # print(\"Failed to solve for source %d\"%i+1)\n # errorAll = errorAll + 1\n #return errorAll",
"def solve(self):\n\n # Set up display header if verbose operation enabled\n if self.opt['Verbose']:\n hdr = 'Itn DFidX PriResX DuaResX DFidG' + \\\n ' ResG '\n print(hdr)\n print('-' * len(hdr))\n\n # Main iteration loop\n for n in range(self.opt['MaxMainIter']):\n\n # At start of 2nd iteration, set the numbers of inner\n # iterations for the X and G solvers from the options\n # object for the outer solver\n if n == 1:\n self.slvX.opt['MaxMainIter'] = self.opt['XslvIter']\n self.slvG.opt['MaxMainIter'] = self.opt['GslvIter']\n\n # Run the configured number of iterations of the X (CSC)\n # solver and assign the result to X\n self.X = self.slvX.solve()\n\n # Compute the sum of the subpixel shifts of X\n Xhs = np.sum(fftconv(self.H, self.X.squeeze(), axes=(0, 1)),\n axis=-1)\n\n # Set the convolution kernel in the deconvolution solver\n # to the sum of the subpixel shifts of X\n self.slvG.setG(Xhs)\n # Run the configured number of iterations of the G\n # (deconvolution) solver and crop the result to obtain the\n # updated g\n self.g = self.slvG.solve()[0:self.gshp[0], 0:self.gshp[1]]\n\n # Construct a new dictionary for the X (CSC) solver from\n # the updated psf g\n self.D, self.dn = self.getD(self.g)\n self.slvX.setdict(self.D[..., np.newaxis, np.newaxis, :])\n\n # Display iteration statistics if verbose operation enabled\n if self.opt['Verbose']:\n itsX = self.slvX.getitstat()\n itsG = self.slvG.getitstat()\n fmt = '%3d %.3e %.3e %.3e %.3e %.3e'\n tpl = (n, itsX.DFid[-1], itsX.PrimalRsdl[-1],\n itsX.DualRsdl[-1], itsG.DFid[-1], itsG.Rsdl[-1])\n print(fmt % tpl)\n\n # Return the (normalised) psf estimate g\n return self.g / np.linalg.norm(self.g)",
"def get_sol(self):",
"def test_linear_buckling_iso_CCSS(plot_static=False, plot_lb=False):\n # number of nodes\n nx = 5 # along x\n ny = 5 # along y\n\n # getting integration points\n nint = 4\n points, weights = get_points_weights(nint=nint)\n\n # geometry\n a = 3 # along x\n b = 3 # along y\n\n # material properties\n E = 200e9\n nu = 0.3\n laminaprop = (E, E, nu)\n stack = [0]\n h = 0.001\n lam = read_stack(stack=stack, plyt=h, laminaprop=laminaprop)\n\n # creating mesh\n x = np.linspace(0, a, nx)\n y = np.linspace(0, b, ny)\n xmesh, ymesh = np.meshgrid(x, y)\n\n # node coordinates and position in the global matrix\n ncoords = np.vstack((xmesh.T.flatten(), ymesh.T.flatten())).T\n nids = 1 + np.arange(ncoords.shape[0])\n nid_pos = dict(zip(nids, np.arange(len(nids))))\n\n # identifying nodal connectivity for plate elements\n # similar than Nastran's CQUAD4\n #\n # ^ y\n # |\n #\n # 4 ________ 3\n # | |\n # | | --> x\n # | |\n # |_______|\n # 1 2\n\n\n nids_mesh = nids.reshape(nx, ny)\n n1s = nids_mesh[:-1, :-1].flatten()\n n2s = nids_mesh[1:, :-1].flatten()\n n3s = nids_mesh[1:, 1:].flatten()\n n4s = nids_mesh[:-1, 1:].flatten()\n\n num_elements = len(n1s)\n print('num_elements', num_elements)\n\n N = DOF*nx*ny\n Kr = np.zeros(KC0_SPARSE_SIZE*num_elements, dtype=INT)\n Kc = np.zeros(KC0_SPARSE_SIZE*num_elements, dtype=INT)\n Kv = np.zeros(KC0_SPARSE_SIZE*num_elements, dtype=DOUBLE)\n KGr = np.zeros(KG_SPARSE_SIZE*num_elements, dtype=INT)\n KGc = np.zeros(KG_SPARSE_SIZE*num_elements, dtype=INT)\n KGv = np.zeros(KG_SPARSE_SIZE*num_elements, dtype=DOUBLE)\n init_k_KC0 = 0\n init_k_KG = 0\n\n plates = []\n for n1, n2, n3, n4 in zip(n1s, n2s, n3s, n4s):\n plate = BFSPlate2D()\n plate.n1 = n1\n plate.n2 = n2\n plate.n3 = n3\n plate.n4 = n4\n plate.c1 = DOF*nid_pos[n1]\n plate.c2 = DOF*nid_pos[n2]\n plate.c3 = DOF*nid_pos[n3]\n plate.c4 = DOF*nid_pos[n4]\n plate.ABD = lam.ABD\n plate.lex = a/(nx - 1)\n plate.ley = b/(ny - 1)\n plate.init_k_KC0 = init_k_KC0\n plate.init_k_KG = init_k_KG\n update_KC0(plate, points, weights, Kr, Kc, Kv)\n init_k_KC0 += KC0_SPARSE_SIZE\n init_k_KG += KG_SPARSE_SIZE\n plates.append(plate)\n\n KC0 = coo_matrix((Kv, (Kr, Kc)), shape=(N, N)).tocsc()\n\n # applying boundary conditions\n\n # locating nodes\n bk = np.zeros(KC0.shape[0], dtype=bool) # constrained DOFs, can be used to prescribe displacements\n\n x = ncoords[:, 0]\n y = ncoords[:, 1]\n\n # applying boundary conditions\n # simply supported\n check = isclose(x, 0) | isclose(x, a) | isclose(y, 0) | isclose(y, b)\n bk[2::DOF] = check\n check = isclose(x, 0) | isclose(x, a)\n bk[3::DOF] = check\n # point supports\n check = isclose(x, a/2) & (isclose(y, 0) | isclose(y, b))\n bk[0::DOF] = check\n check = isclose(y, b/2) & (isclose(x, 0) | isclose(x, a))\n bk[1::DOF] = check\n\n # unconstrained nodes\n bu = ~bk # logical_not\n\n # defining external force vector\n fext = np.zeros(KC0.shape[0], dtype=float)\n\n # applying unitary load along u at x=a\n # nodes at vertices get 1/2 the force\n for plate in plates:\n pos1 = nid_pos[plate.n1]\n pos2 = nid_pos[plate.n2]\n pos3 = nid_pos[plate.n3]\n pos4 = nid_pos[plate.n4]\n if isclose(x[pos3], a):\n Nxx = -1\n xi = +1\n elif isclose(x[pos1], 0):\n Nxx = +1\n xi = -1\n else:\n continue\n lex = plate.lex\n ley = plate.ley\n indices = []\n c1 = DOF*pos1\n c2 = DOF*pos2\n c3 = DOF*pos3\n c4 = DOF*pos4\n cs = [c1, c2, c3, c4]\n for ci in cs:\n for i in range(DOF):\n indices.append(ci + i)\n fe = np.zeros(4*DOF, dtype=float)\n for j in range(nint):\n eta = points[j]\n plate.update_Nu(xi, eta)\n Nu = np.asarray(plate.Nu)\n fe += ley/2*weights[j]*Nu*Nxx\n fext[indices] += fe\n\n Kuu = KC0[bu, :][:, bu]\n fextu = fext[bu]\n\n # static solver\n uu = spsolve(Kuu, fextu)\n u = np.zeros(KC0.shape[0], dtype=float)\n u[bu] = uu\n\n if plot_static:\n import matplotlib\n matplotlib.use('TkAgg')\n import matplotlib.pyplot as plt\n plt.gca().set_aspect('equal')\n uplot = u[0::DOF].reshape(nx, ny).T\n vplot = u[1::DOF].reshape(nx, ny).T\n print('u extremes', uplot.min(), uplot.max())\n print('v extremes', vplot.min(), vplot.max())\n levels = np.linspace(uplot.min(), uplot.max(), 300)\n plt.contourf(xmesh, ymesh, uplot, levels=levels)\n plt.colorbar()\n plt.show()\n\n # eigenvalue solver\n\n # getting integration points\n for plate in plates:\n update_KG(u, plate, points, weights, KGr, KGc, KGv)\n KG = coo_matrix((KGv, (KGr, KGc)), shape=(N, N)).tocsc()\n KGuu = KG[bu, :][:, bu]\n\n # solving modified generalized eigenvalue problem\n # Original: (KC0 + lambda*KG)*v = 0\n # Modified: (-1/lambda)*KC0*v = KG*v #NOTE here we find (-1/lambda)\n num_eigenvalues = 5\n eigvals, eigvecsu = eigsh(A=KGuu, k=num_eigenvalues, which='SM', M=Kuu,\n tol=1e-6, sigma=1., mode='cayley')\n eigvals = -1./eigvals\n eigvecs = np.zeros((KC0.shape[0], num_eigenvalues), dtype=float)\n eigvecs[bu, :] = eigvecsu\n\n if plot_lb:\n import matplotlib\n matplotlib.use('TkAgg')\n import matplotlib.pyplot as plt\n plt.gca().set_aspect('equal')\n mode = 0\n wplot = eigvecs[2::DOF, mode].reshape(nx, ny).T\n levels = np.linspace(wplot.min(), wplot.max(), 300)\n plt.contourf(xmesh, ymesh, wplot, levels=levels)\n plt.colorbar()\n plt.show()\n\n kc = eigvals[0]/(E*np.pi**2*(h/b)**2/(12*(1 - nu**2))*h)\n assert isclose(kc, 6.6, rtol=0.05)",
"def optimize(self):\n\n self.logger.info(\"Solving with Dynamic Slope Scaling Procedure in Julia :\")\n optimization_start = time.time()\n\n # 1. Preprocess for old network graph\n if self.old_network_graph is not None:\n\n # DSSP on old network\n old_network_obj = sum(list(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).values()))-1e-5\n try:\n self.check_infeasibility(self.old_network_graph, old_network_obj)\n except DHCOptimizerException as e:\n e.data = \"Invalid existing network: \" + e.data\n raise e\n\n flows, obj_val = self.optimize_with_dssp_julia(self.old_network_graph, old_network_obj, set())\n self.logger.info(\"Optimization phase time: %.2fs\" % (time.time() - optimization_start))\n solution_old_graph = self.build_solution_graph(self.old_network_graph, flows)\n\n if self.modify_old_network:\n\n # Add max capacity on old edges\n self.old_capacity = deepcopy(flows)\n old_buildings = list(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).values())\n for key in flows:\n if (key[1],key[0],0) not in self.old_capacity and key[1] not in old_buildings:\n self.old_capacity[(key[1],key[0],0)] = self.old_capacity[key]\n\n # Add Imaginary edges\n for edge in self.old_capacity:\n if self.optimization_graph.has_edge(*edge):\n\n # add nodes\n if not self.optimization_graph.has_node(config.IM_PREFIX+edge[0]):\n self.optimization_graph.add_node(config.IM_PREFIX+edge[0])\n self.optimization_graph.nodes[config.IM_PREFIX+edge[0]][config.GPD_GEO_KEY] = \\\n self.optimization_graph.nodes[edge[0]][config.GPD_GEO_KEY]\n if not self.optimization_graph.has_node(config.IM_PREFIX+edge[1]):\n self.optimization_graph.add_node(config.IM_PREFIX+edge[1])\n self.optimization_graph.nodes[config.IM_PREFIX+edge[1]][config.GPD_GEO_KEY] = \\\n self.optimization_graph.nodes[edge[1]][config.GPD_GEO_KEY]\n # add edges\n if not self.optimization_graph.has_edge(edge[0],config.IM_PREFIX+edge[0]):\n self.optimization_graph.add_edge(edge[0],config.IM_PREFIX+edge[0])\n if not self.optimization_graph.has_edge(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1]):\n self.optimization_graph.add_edge(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1])\n if not self.optimization_graph.has_edge(config.IM_PREFIX+edge[1],edge[1]):\n self.optimization_graph.add_edge(config.IM_PREFIX+edge[1],edge[1])\n\n # put cost\n self.optimization_graph.edges[(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1],0)][config.EDGE_COST_KEY] = \\\n self.optimization_graph.edges[(edge[0],edge[1],0)][config.EDGE_COST_KEY]\n self.optimization_graph.edges[(edge[0],edge[1],0)][config.EDGE_COST_KEY] = 1e-5\n self.optimization_graph.edges[(edge[0],config.IM_PREFIX+edge[0],0)][config.EDGE_COST_KEY] = 1e-5\n self.optimization_graph.edges[(config.IM_PREFIX+edge[1],edge[1],0)][config.EDGE_COST_KEY] = 1e-5\n\n else:\n # if we don't modify the old network, we have to change the capacity of the supplies\n already_consummed = {}\n for edge in solution_old_graph.edges():\n if solution_old_graph.nodes[edge[0]].get(config.NODE_TYPE_KEY) == config.SUPPLY_NODE_TYPE:\n already_consummed[edge[0]] = already_consummed.get(edge[0], 0) + \\\n solution_old_graph.edges[edge][config.SOLUTION_POWER_FLOW_KEY]\n for source in already_consummed:\n if already_consummed[source] <= self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY]:\n self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] -= already_consummed[source]\n self.network_objective -= already_consummed[source]\n else:\n self.network_objective -= self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY]\n self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] = 0\n\n # Remove edges from old network\n edges_to_remove = set()\n for e in self.optimization_graph.edges():\n if self.old_network_graph.has_edge(*e) or self.old_network_graph.has_edge(e[1],e[0]):\n edges_to_remove.add(e)\n self.optimization_graph.remove_edges_from(edges_to_remove)\n\n # Remove isolated buildings of optimization graph\n isolated_to_remove = set()\n for e in self.old_network_graph.edges():\n if e[0] in self.old_network_graph.nodes() and \\\n self.optimization_graph.nodes[e[1]].get(config.NODE_TYPE_KEY) == config.BUILDING_NODE_TYPE:\n isolated_to_remove.add(e)\n self.optimization_graph.remove_edges_from(isolated_to_remove)\n\n # Remove buildings from old network\n for n, data in self.old_network_graph.nodes(data=True):\n if data.get(config.NODE_TYPE_KEY) == config.BUILDING_NODE_TYPE:\n self.optimization_graph.remove_node(n)\n\n # Re-link sources\n sources = set()\n for n, data in self.optimization_graph.nodes(data=True):\n if data.get(config.NODE_TYPE_KEY) == config.SUPPLY_NODE_TYPE:\n sources.add(n)\n source_graph = self.optimization_graph.subgraph(sources).copy()\n self.optimization_graph.remove_nodes_from(sources)\n gnx.remove_isolates(self.optimization_graph)\n node_filter = lambda n: self.optimization_graph.nodes.get(n,{}).get(config.NODE_TYPE_KEY) != config.BUILDING_NODE_TYPE\n gnx.spatial_points_merge(self.optimization_graph, source_graph.nodes_to_gdf(), node_filter=node_filter, inplace=True)\n\n # fill missing information\n gnx.fill_edges_missing_geometry_attributes(self.optimization_graph)\n gnx.fill_length_attribute(self.optimization_graph, config.EDGE_LENGTH_KEY, only_missing=True)\n gnx.fill_length_attribute(self.optimization_graph, config.EDGE_COST_KEY, only_missing=True)\n for e in self.optimization_graph.edges(keys=True):\n self.optimization_graph.edges[e][config.LEASTCOST_COEF_KEY] = \\\n self.optimization_graph.edges[e].get(config.LEASTCOST_COEF_KEY,0)\n\n\n\n # 2. Process the DSSP on optimization graph\n self.check_is_ready()\n self.check_infeasibility(self.optimization_graph, self.network_objective)\n\n if self.old_network_graph is not None and self.modify_old_network:\n old_buildings = set(nx.get_node_attributes(self.old_network_graph, config.BUILDING_CONSUMPTION_KEY).keys())\n else:\n old_buildings = set()\n flows, obj_val = self.optimize_with_dssp_julia(self.optimization_graph, self.network_objective, old_buildings,postprocess= (not self.modify_old_network))\n self.logger.info(\"Optimization phase time: %.2fs\" % (time.time() - optimization_start))\n self.solution_graph = self.build_solution_graph(self.optimization_graph, flows, self.connected)\n\n # 3. Postprocess for old network graph\n if self.old_network_graph is not None:\n \n if self.modify_old_network:\n # Put the right supply capacity and cost\n for edge in self.old_capacity:\n if self.solution_graph.has_edge(edge[0],edge[1]):\n self.solution_graph.edges[(edge[0],edge[1])][config.EDGE_COST_KEY] = \\\n self.optimization_graph.edges[(config.IM_PREFIX+edge[0],config.IM_PREFIX+edge[1],0)][config.EDGE_COST_KEY]\n \n # Remove imaginary edges\n imaginary_nodes_to_remove = set()\n nodes_to_relabel = {}\n for edge in self.solution_graph.edges():\n if str(edge[0]).startswith(config.IM_PREFIX) and str(edge[1]).startswith(config.IM_PREFIX):\n real_edge = edge[0][len(config.IM_PREFIX):],edge[1][len(config.IM_PREFIX):]\n self.old_capacity[(real_edge[0], real_edge[1], 0)] = pd.np.inf\n self.old_capacity[(real_edge[1], real_edge[0], 0)] = pd.np.inf\n if not self.solution_graph.has_edge(*real_edge):\n for i in range(2):\n nodes_to_relabel[edge[i]] = real_edge[i]\n else:\n self.solution_graph.edges[real_edge[0],real_edge[1]][config.SOLUTION_POWER_FLOW_KEY] += \\\n self.solution_graph.edges[edge].get(config.SOLUTION_POWER_FLOW_KEY,0)\n imaginary_nodes_to_remove.add(edge[0])\n imaginary_nodes_to_remove.add(edge[1])\n elif str(edge[0]).startswith(config.IM_PREFIX):\n imaginary_nodes_to_remove.add(edge[0])\n elif str(edge[1]).startswith(config.IM_PREFIX):\n imaginary_nodes_to_remove.add(edge[1])\n\n nx.relabel_nodes(self.solution_graph, nodes_to_relabel, copy=False)\n self.solution_graph.remove_nodes_from(list(imaginary_nodes_to_remove))\n for node in nodes_to_relabel.values():\n if self.solution_graph.has_edge(node, node):\n self.solution_graph.remove_edge(node, node)\n\n else:\n for source in nx.get_node_attributes(self.solution_graph, config.SUPPLY_POWER_CAPACITY_KEY):\n self.solution_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] += already_consummed.get(source,0)\n self.optimization_graph.nodes[source][config.SUPPLY_POWER_CAPACITY_KEY] += already_consummed.get(source,0)\n\n return flows, obj_val",
"def __solve_alternative_linear_problem(self, user):\n result = [0] * self.layout_slots\n de_rand_approach = \"greedy\"\n bins_per_category = []\n bins_cardinality = []\n for _ in range(len(self.categories)):\n bins_per_category.append([])\n bins_cardinality.append([])\n\n for cat in range(len(self.categories)):\n for _ in range(len(self.news_row_pivots) + 1):\n bins_per_category[cat].append([])\n bins_cardinality[cat].append([])\n for _ in range(len(self.news_column_pivots) + 1):\n bins_per_category[cat][-1].append([])\n bins_cardinality[cat][-1].append(0)\n\n for news in self.news_pool:\n category_index = self.categories.index(news.news_category)\n x, y = self.__compute_position_in_learning_matrix(user=user, news=news)\n bins_per_category[category_index][x][y].append(news)\n bins_cardinality[category_index][x][y] += 1\n\n index = 0\n bin_samples = []\n for cat in range(len(self.categories)):\n for x in range(len(self.news_row_pivots) + 1):\n for y in range(len(self.news_column_pivots) + 1):\n if (y == 0) and (x != 0):\n continue\n self.alt_B[index] = min(bins_cardinality[cat][x][y], self.layout_slots)\n index += 1\n try:\n selected_news = np.random.choice(bins_per_category[cat][x][y])\n self.sample_quality(selected_news, user, interest_decay=True)\n bin_samples += [selected_news.sampled_quality] * self.layout_slots\n except ValueError:\n bin_samples += [0] * self.layout_slots\n\n self.alt_C = np.array(list(np.array(self.alt_lambdas) * bin_samples)) * -1\n linear_problem = opt.linprog(A_ub=self.alt_A, b_ub=self.alt_B, c=self.alt_C)\n\n # FOR EACH SLOT, ISOLATES THE CORRESPONDING VARIABLES\n slots_assegnation_probabilities = []\n slot_counter = 0\n tmp_slot_probabilities = []\n while slot_counter < self.layout_slots:\n i = slot_counter\n while i < len(linear_problem.x):\n tmp_slot_probabilities.append(np.abs(linear_problem.x[i]))\n i += self.layout_slots\n slots_assegnation_probabilities.append(tmp_slot_probabilities.copy())\n tmp_slot_probabilities.clear()\n slot_counter += 1\n\n slot_promenances = self.real_slot_promenances.copy()\n slot_promenances_norm = np.array(slot_promenances) / sum(slot_promenances)\n slots_nr = [s for s in range(0, self.layout_slots)]\n for i in range(self.layout_slots):\n if de_rand_approach == \"ordered\":\n k = i\n elif (de_rand_approach == \"greedy\") or (de_rand_approach == \"greedy_max\"):\n k = np.argmax(slot_promenances)\n slot_promenances[k] = 0\n elif de_rand_approach == \"randomized\":\n k = np.random.choice(slots_nr, p=slot_promenances_norm)\n slot_promenances[k] = 0\n else:\n raise RuntimeError(\"De_randomization approach not recognized. Try either 'ordered', 'greedy', \"\n \"'randomized' or 'greedy_max'.\")\n\n target_slot_probabilities = [x for x in slots_assegnation_probabilities[k]]\n target_slot_probabilities_norm = np.array(target_slot_probabilities) / sum(target_slot_probabilities)\n if de_rand_approach == \"greedy_max\":\n assigning_bin_index = np.argmax(target_slot_probabilities)\n cat_index = int(assigning_bin_index / self.num_of_bins)\n x = self.bins_for_position[int(assigning_bin_index)][0]\n y = self.bins_for_position[int(assigning_bin_index)][1]\n\n else:\n assigning_bin = np.random.choice([x for x in range(len(slots_assegnation_probabilities[k]))], p=target_slot_probabilities_norm)\n cat_index = int(assigning_bin / self.num_of_bins)\n x = self.bins_for_position[int(assigning_bin)][0]\n y = self.bins_for_position[int(assigning_bin)][1]\n\n result[k] = np.random.choice(bins_per_category[cat_index][x][y])\n\n return result",
"def incompatibility_solve_cg(self, useAMS=True):\n \n zero = Expression((\"0.0\", \"0.0\", \"0.0\"), degree=1)\n bc = DirichletBC(self.PN, zero, DirichletBoundary())\n \n T1 = Function(self.PN) # Solution for the curl curl problem\n T2 = Function(self.PN) # Solution for the curl curl problem\n T3 = Function(self.PN) # Solution for the curl curl problem\n\n if useAMS:\n \n # Set operator for the linear solver\n L_X = inner(self.strain_diff_1, curl(self.inc_v0))*dx\n A_X, b_X = assemble_system(self.a_X, L_X, bc)\n self.ksp_X.setOperators(as_backend_type(A_X).mat())\n self.ksp_X.solve(as_backend_type(b_X).vec(), as_backend_type(T1.vector()).vec())\n\n # Show linear solver details\n self.ksp_X.view()\n\n # Solve 2nd system\n L_X = inner(self.strain_diff_2, curl(self.inc_v0))*dx\n A_X, b_X = assemble_system(self.a_X, L_X, bc)\n self.ksp_X.setOperators(as_backend_type(A_X).mat())\n self.ksp_X.solve(as_backend_type(b_X).vec(), as_backend_type(T2.vector()).vec())\n\n # Solve 3nd system\n L_X = inner(self.strain_diff_3, curl(self.inc_v0))*dx\n A_X, b_X= assemble_system(self.a_X, L_X, bc)\n self.ksp_X.setOperators(as_backend_type(A_X).mat())\n self.ksp_X.solve(as_backend_type(b_X).vec(), as_backend_type(T3.vector()).vec())\n \n else:\n\n ### vanilla CG works with potential as RHS\n\n L_X = inner(self.strain_diff_1, curl(self.inc_v0))*dx\n solve(self.a_X == L_X, T1, bc, \n solver_parameters={'linear_solver': 'cg', 'preconditioner': 'jacobi'}) \n\n L_X = inner(self.strain_diff_2, curl(self.inc_v0))*dx\n solve(self.a_X == L_X, T2, bc, \n solver_parameters={'linear_solver': 'cg', 'preconditioner': 'jacobi'}) \n\n L_X = inner(self.strain_diff_3, curl(self.inc_v0))*dx\n solve(self.a_X == L_X, T3, bc, \n solver_parameters={'linear_solver': 'cg', 'preconditioner': 'jacobi'})\n\n return project( self.X_0(curl(T1),curl(T2),curl(T3)), \n self.TFS, solver_type=\"cg\", preconditioner_type=\"ilu\")",
"def g_solving_subproblem_of_LR(self,vehicle_id):\r\n global_LB=-10000\r\n global_UB=10000\r\n iteration_for_RSP=20\r\n optimal_solution_for_RSP=None\r\n optimal_value_y=0\r\n self.multiplier_v=0.5\r\n\r\n #solve the expected shortest path problem\r\n self.g_dynamic_programming_algorithm(vehicle_id, 4)\r\n #obtain the variance\r\n y_=self.g_ending_state_vector[vehicle_id].VSStateVector[0].Primal_Label_cost_variance\r\n\r\n for k in range(iteration_for_RSP):\r\n # print(k)\r\n LB=0\r\n # step 2: solve decomposed dual problems\r\n # Part I: subproblem of x\r\n self.g_dynamic_programming_algorithm(vehicle_id, 2)\r\n LB+=self.g_ending_state_vector[vehicle_id].VSStateVector[0].Label_cost_for_lagrangian\r\n\r\n # Part II: subproblem of y\r\n obj_of_y_ = self.reliability * (y_) ** 0.5 - self.multiplier_v * y_\r\n if obj_of_y_ > 0:\r\n y = 0\r\n LB += 0\r\n else:\r\n y = y_\r\n LB += obj_of_y_\r\n # generate an upper bound\r\n variance = self.g_ending_state_vector[vehicle_id].VSStateVector[0].Primal_Label_cost_variance\r\n Label_cost_for_lagrangian_mean=self.g_ending_state_vector[vehicle_id].VSStateVector[0].Label_cost_for_lagrangian_mean\r\n UB=Label_cost_for_lagrangian_mean+self.reliability*(variance)**0.5\r\n\r\n # print(\"UB:{}\".format(UB))\r\n # print(\"LB:{}\".format(LB))\r\n\r\n # UB and LB update\r\n if LB > global_LB:\r\n global_LB = LB\r\n optimal_solution_for_RSP = self.g_ending_state_vector[vehicle_id].VSStateVector\r\n optimal_value_y = y\r\n\r\n if UB < global_UB:\r\n global_UB = UB\r\n\r\n\r\n # step 3: update multipliers\r\n if variance-y!= 0:\r\n self.multiplier_v+= (global_UB - LB) / (variance-y)\r\n # if self.multiplier_v<0:\r\n # self.multiplier_v=1\r\n # print(self.multiplier_v)\r\n\r\n # step 4: termination condition test\r\n if global_UB != 0:\r\n gap = abs((global_UB-global_LB) / global_UB)\r\n # print(gap)\r\n if gap < 0.02:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP, optimal_value_y,global_LB,global_UB\r\n else:\r\n if global_UB - global_LB == 0:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP,optimal_value_y,global_LB,global_UB\r\n\r\n if k == iteration_for_RSP - 1:\r\n print(\"iteration{}\".format(k + 1))\r\n print(self.multiplier_v)\r\n print(global_LB, global_UB)\r\n return optimal_solution_for_RSP,optimal_value_y,global_LB,global_UB",
"def solve():\n\n s, g, e = make_lattice(21)\n stack = deque([[e]])\n vals = {s: 1}\n max_n = 0\n\n while stack:\n max_n = max(max_n, len(stack))\n n, *p = stack.pop()\n for c in g.get_connected(n):\n if c > n:\n continue\n if c in vals:\n propagate(c, [n] + p, vals)\n else:\n stack.append([c, n] + p)\n return vals[e]",
"def __solve_linear_problem(self, continuity_relaxation=True):\n result = [0] * self.layout_slots\n self.news_pool.sort(key=lambda x: (x.news_category, x.sampled_quality), reverse=True)\n LP_news_pool = []\n done_for_category = False\n category_count = 0\n prev_category = self.news_pool[0].news_category\n # First build a subset of news to easily handle the LP resolution\n for news in self.news_pool:\n if prev_category != news.news_category:\n if category_count < self.layout_slots:\n raise RuntimeWarning(\"Not enough news per category found. There should be at least \" +\n str(self.layout_slots) + \" news with category = \" + prev_category + \", but \"\n \"only \" + str(category_count) + \"are present. The allocation maybe \"\n \"sub-optimal.\")\n category_count = 0\n done_for_category = False\n prev_category = news.news_category\n if not done_for_category:\n LP_news_pool.append(news)\n category_count += 1\n if category_count == self.layout_slots:\n done_for_category = True\n\n # If not all the required news are present, add some other news at random.\n while len(LP_news_pool) < len(self.categories) * self.layout_slots:\n random_news = np.random.choice(self.news_pool)\n if random_news not in LP_news_pool:\n LP_news_pool.append(random_news)\n\n LP_news_pool.sort(key=lambda x: x.news_category, reverse=False)\n thetas = []\n # Compute the vector of coefficients for the LP objective function\n for news in LP_news_pool:\n thetas += [news.sampled_quality] * self.layout_slots\n self.C = list(np.array(thetas) * np.array(self.lambdas))\n\n # Then solve an LP or an ILP\n if continuity_relaxation:\n linear_problem = opt.linprog(A_ub=self.A, b_ub=self.B, c=self.C)\n slots_assegnation_probabilities = []\n slot_counter = 0\n tmp_slot_probabilities = []\n while slot_counter < self.layout_slots:\n i = slot_counter\n while i < len(linear_problem.x):\n tmp_slot_probabilities.append(np.abs(linear_problem.x[i]))\n i += self.layout_slots\n slots_assegnation_probabilities.append(tmp_slot_probabilities.copy())\n tmp_slot_probabilities.clear()\n slot_counter += 1\n\n self.measure_allocation_diversity_bounds_errors(slots_assegnation_probabilities, LP_news_pool, iter=10)\n\n result = self.__de_randomize_LP(LP_news_pool, slots_assegnation_probabilities, self.lp_rand_tech)\n\n else:\n # INITIALIZES AN INTEGER LINEAR PROBLEM\n ILP = LpProblem(\"News_ILP\", LpMaximize)\n ILP_variables = []\n\n for cat in range(len(self.categories)):\n for j in range(self.layout_slots):\n for s in range(self.layout_slots):\n ILP_variables.append(LpVariable(name=str(cat) + \"_\" + str(j) + \"_\" + str(s), lowBound=0, upBound=1, cat=\"Binary\"))\n\n # Objective function addition to the problem\n C = list(np.array(self.C) * -1)\n ILP += lpSum([C[i] * ILP_variables[i] for i in range(len(self.C))])\n\n # Category constraints addition to the problem\n for i in range(len(self.categories)):\n ILP += lpSum([self.A[i][j] * ILP_variables[j] for j in range(len(self.C))]) <= self.B[i]\n\n # Slots capacity constraints addition to the problem\n for i in range(len(self.categories), len(self.categories) + self.layout_slots):\n ILP += lpSum([self.A[i][j] * ILP_variables[j] for j in range(len(self.C))]) <= self.B[i]\n\n # News capacity constraints addition to the problem\n for i in range(len(self.categories) + self.layout_slots, len(self.categories) + self.layout_slots + len(self.categories) * self.layout_slots):\n ILP += lpSum([self.A[i][j] * ILP_variables[j] for j in range(len(self.C))]) <= self.B[i]\n\n ILP.solve()\n\n # FOR EACH SLOT, ISOLATES THE CORRESPONDING VARIABLES\n slots_assegnation_probabilities = []\n slot_counter = 0\n tmp_slot_probabilities = []\n while slot_counter < self.layout_slots:\n i = slot_counter\n while i < len(ILP.variables()):\n tmp_slot_probabilities.append(ILP.variables().__getitem__(i))\n i += self.layout_slots\n slots_assegnation_probabilities.append(tmp_slot_probabilities.copy())\n tmp_slot_probabilities.clear()\n slot_counter += 1\n\n # TAKES THE VARIABLES WHICH VALUE IS 1, THEN ALLOCATES THE CORRESPONDING NEWS IN THE RESULT PAGE\n for i in range(len(result)):\n for probabilities in slots_assegnation_probabilities[i]:\n if probabilities.varValue > 0:\n var_name = probabilities.name\n break\n indexes = var_name.split(\"_\")\n category_index = int(indexes[0])\n news_number = int(indexes[1])\n news_index = category_index * self.layout_slots + news_number\n result[i] = LP_news_pool[news_index]\n\n return result",
"def solve(self):",
"def potentialSolver4(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged",
"def solve(self):\n # check for jacobian and set it if present and to be used\n if self.use_sparse:\n if self._use_jac and hasattr(self.problem,'sparse_jac'):\n jac = self.problem.sparse_jac\n else:\n jac = None\n else:\n if self._use_jac and hasattr(self.problem,'jac'):\n jac = self.problem.jac\n else:\n jac = None\n \n # Initialize solver and solve \n \n solved = False\n local_min = False\n\n res = N.zeros(self.x0.__len__())\n while (not solved) and self.reg_count < 2:\n try:\n if self._use_fscale:\n self.solver.KINSOL_init(self.func,self.x0,self.dim,jac,self.constraints,self.use_sparse,self.verbosity,self.norm_of_res,self.reg_param,self.fscale)\n else:\n self.solver.KINSOL_init(self.func,self.x0,self.dim,jac,self.constraints,self.use_sparse,self.verbosity,self.norm_of_res,self.reg_param,None)\n start = time.clock()\n res = self.solver.KINSOL_solve(not self._use_ls)\n stop = time.clock()\n self.exec_time += (stop - start)\n solved = True\n except KINError as error:\n if error.value == 42:\n # Try the heuristic\n if hasattr(self.problem, 'get_heuristic_x0'):\n print \"----------------------------------------------------\"\n print \" Solver stuck with zero step-length.\"\n print \"----------------------------------------------------\"\n print \"The following variables have start value zero\"\n print \"and min set to zero causing the zero step-lenght.\"\n print \"These settings are either set by default or by user.\"\n print \"\"\n\n self.x0 = self.problem.get_heuristic_x0()\n self.reg_count += 1\n \n print \"\"\n print \"This setting (start and min to zero) can often\"\n print \"cause problem when initializing the system. \"\n print \"\"\n print \"To avoid this the above variables have\"\n print \"their start attributes reset to one.\"\n print \"\"\n print \"Trying to solve the system again...\"\n else:\n raise KINSOL_Exception(\"Regularization failed due to constraints, tried getting heuristic initial guess but failed.\")\n \n\n elif (error.value == 2):\n print \"---------------------------------------------------------\"\n print \"\"\n print \" !!! WARNING !!!\"\n print \"\"\n print \" KINSOL has returned a result but the algorithm has converged\"\n print \" to a local minima, the initial values are NOT consistant!\"\n print \"\"\n print \"---------------------------------------------------------\"\n solved = True\n local_min = True\n else:\n # Other error, send onward as exception\n self.problem.check_constraints(res)\n raise KINSOL_Exception(error.msg[error.value])\n \n if not solved:\n self.solver.Free_KINSOL()\n raise KINSOL_Exception(\"Algorithm exited solution loop without finding a solution, please contact Assimulo support.\")\n\n if self.check_with_model:\n self.problem.check_constraints(res)\n if not local_min:\n print \"Problem sent to KINSOL solved.\"\n \n return res",
"def optimize(o_molsys, computer):\n logger = logging.getLogger(__name__)\n\n # Take care of some initial variable declarations\n step_number = 0 # number of steps taken. Partial. IRC alg uses two step counters\n irc_step_number = None\n total_steps_taken = 0\n H = 0 # hessian in internals\n\n # Try to optimize one structure OR set of IRC points. OptError and all Exceptions caught below.\n try:\n\n # Prepare for multiple IRC computation\n if op.Params.opt_type == \"IRC\":\n irc_step_number = 0\n IRCdata.history = IRCdata.IRCdata()\n IRCdata.history.set_atom_symbols(o_molsys.atom_symbols)\n # Why do we need to have IRCdata.history store its own copy?\n IRCdata.history.set_step_size_and_direction(op.Params.irc_step_size, op.Params.irc_direction)\n logger.debug(\"\\tIRC data object created\\n\")\n\n converged = False\n # o_molsys = make_internal_coords(o_molsys)\n if not o_molsys.intcos_present:\n make_internal_coords(o_molsys)\n logger.debug(\"Molecular systems after make_internal_coords:\")\n logger.debug(str(o_molsys))\n\n # following loop may repeat over multiple algorithms OR over IRC points\n while not converged:\n try:\n # if optimization coordinates are absent, choose them. Could be erased after AlgError\n if not o_molsys.intcos_present:\n make_internal_coords(o_molsys)\n logger.debug(\"Molecular systems after make_internal_coords:\")\n logger.debug(str(o_molsys))\n\n logger.info(\"\\tStarting optimization algorithm.\\n\")\n logger.info(str(o_molsys))\n\n # Do special initial step-0 for each IRC point.\n # For IRC point, we form/get the Hessian now.\n\n if op.Params.opt_type == \"IRC\":\n if irc_step_number == 0:\n # Step along lowest eigenvector of mass-weighted Hessian.\n logger.info(\"\\tBeginning IRC from the transition state.\\n\")\n logger.info(\"\\tStepping along lowest Hessian eigenvector.\\n\")\n\n H, gX = get_pes_info(H, computer, o_molsys, step_number, irc_step_number)\n logger.debug(print_mat_string(H, title=\"Transformed Hessian in internals.\"))\n\n # Add the transition state as the first IRC point\n q_0 = o_molsys.q_array()\n x_0 = o_molsys.geom\n f_q = o_molsys.gradient_to_internals(gX, -1.0)\n f_x = np.multiply(-1, gX)\n E = computer.energies[-1]\n\n IRCdata.history.add_irc_point(0, q_0, x_0, f_q, f_x, E)\n irc_step_number += 1\n\n # Lowest eigenvector of mass-weighted Hessian.\n G = o_molsys.Gmat(massWeight=True)\n G_root = symm_mat_root(G)\n H_q_m = np.dot(np.dot(G_root, H), G_root.T)\n vM = lowest_eigenvector_symm_mat(H_q_m)\n logger.info(print_array_string(vM, title=\"Lowest evect of H_q_M\"))\n\n # Un mass-weight vector.\n G_root_inv = symm_mat_inv(G_root, redundant=True)\n v = np.dot(G_root_inv, vM)\n\n if op.Params.irc_direction == \"BACKWARD\":\n v *= -1\n # end if IRCStepNumber == 0\n\n else: # Step along gradient.\n logger.info(\"\\tBeginning search for next IRC point.\\n\")\n logger.info(\"\\tStepping along gradient.\\n\")\n v = IRCdata.history.f_q()\n irc_step_number += 1\n\n IRCfollowing.compute_pivot_and_guess_points(o_molsys, v, op.Params.irc_step_size)\n # end if 'IRC'\n\n for step_number in range(op.Params.alg_geom_maxiter):\n header = f\"{'----------------------------':^74}\"\n header += f\"\\n{'Taking A Step: Step Number %d' % (step_number + 1):^90}\"\n header += f\"\\n{'----------------------------':^90}\"\n logger.info(header)\n total_steps_taken += 1\n\n H, gX = get_pes_info(H, computer, o_molsys, step_number, irc_step_number)\n E = computer.energies[-1]\n\n logger.info(\"%s\", print_geom_grad(o_molsys.geom, gX))\n\n if op.Params.print_lvl >= 4:\n hessian.show(H, o_molsys)\n\n f_q = o_molsys.gradient_to_internals(gX, -1.0)\n o_molsys.apply_external_forces(f_q, H, step_number)\n o_molsys.project_redundancies_and_constraints(f_q, H)\n o_molsys.q_show()\n\n if op.Params.test_B:\n testB.test_b(o_molsys)\n if op.Params.test_derivative_B:\n testB.test_derivative_b(o_molsys)\n\n # Check if forces indicate we are approaching minimum.\n if op.Params.opt_type == \"IRC\" and irc_step_number > 2:\n if IRCdata.history.test_for_irc_minimum(f_q):\n logger.info(\"A minimum has been reached on the IRC. Stopping here.\\n\")\n raise IRCendReached()\n\n logger.info(print_array_string(f_q, title=\"Internal forces in au:\"))\n\n history.oHistory.append(o_molsys.geom, E, f_q) # Save initial step info.\n history.oHistory.nuclear_repulsion_energy = computer.trajectory[-1][\"properties\"][\n \"nuclear_repulsion_energy\"\n ]\n\n # Analyze previous step performance; adjust trust radius accordingly.\n # Returns true on first step (no history)\n lastStepOK = history.oHistory.current_step_report()\n\n # If step was bad, take backstep here or raise exception.\n if lastStepOK:\n history.oHistory.consecutiveBacksteps = 0\n else:\n # Don't go backwards until we've gone a few iterations.\n if len(history.oHistory.steps) < 5:\n logger.info(\"\\tNear start of optimization, so ignoring bad step.\\n\")\n elif history.History.consecutiveBacksteps < op.Params.consecutiveBackstepsAllowed:\n history.History.consecutiveBacksteps += 1\n logger.info(\n \"\\tCalling for consecutive backstep number %d.\\n\" % history.History.consecutiveBacksteps\n )\n stepAlgorithms.take_step(o_molsys, E, f_q, H, stepType=\"BACKSTEP\")\n logger.info(\"\\tStructure for next step (au):\\n\")\n o_molsys.show_geom()\n continue\n elif op.Params.dynamic_level == 0: # not using dynamic level, so ignore.\n logger.info(\"\\tNo more backsteps allowed.\" + \"Dynamic level is off.\\n\")\n pass\n else:\n raise AlgError(\"Bad step, and no more backsteps allowed.\")\n\n if op.Params.opt_type == \"IRC\":\n DqGuess = IRCdata.history.q_pivot() - IRCdata.history.q()\n Dq = IRCfollowing.dq_irc(o_molsys, E, f_q, H, op.Params.irc_step_size, DqGuess)\n else: # Displaces and adds step to history.\n Dq = stepAlgorithms.take_step(o_molsys, E, f_q, H, op.Params.step_type, computer)\n\n if op.Params.opt_type == \"IRC\":\n converged = convcheck.conv_check(\n step_number,\n o_molsys,\n Dq,\n f_q,\n computer.energies,\n IRCdata.history,\n )\n logger.info(\"\\tConvergence check returned %s.\" % converged)\n\n if converged:\n q_irc_point = o_molsys.q_array()\n forces_irc_point = o_molsys.gradient_to_internals(gX, -1.0)\n lineDistStep = IRCfollowing.calc_line_dist_step(o_molsys)\n arcDistStep = IRCfollowing.calc_arc_dist_step(o_molsys)\n\n IRCdata.history.add_irc_point(\n irc_step_number,\n q_irc_point,\n o_molsys.geom,\n forces_irc_point,\n np.multiply(-1, gX),\n computer.energies[-1],\n lineDistStep,\n arcDistStep,\n )\n IRCdata.history.progress_report()\n\n else: # not IRC.\n converged = convcheck.conv_check(step_number, o_molsys, Dq, f_q, computer.energies)\n logger.info(\"\\tConvergence check returned %s\" % converged)\n\n if converged: # changed from elif when above if statement active\n logger.info(\"\\tConverged in %d steps!\" % (step_number + 1))\n logger.info(\"\\tFinal energy is %20.13f\" % E)\n logger.info(\"\\tFinal structure (Angstroms): \\n\" + o_molsys.show_geom())\n break # break out of step_number loop\n\n logger.info(\"\\tStructure for next step (au):\\n\" + o_molsys.show_geom())\n\n # Hard quit if too many total steps taken (inc. all IRC points and algorithms).\n\n if total_steps_taken == op.Params.geom_maxiter:\n logger.error(\n \"\\tTotal number of steps (%d) exceeds maximum allowed (%d).\\n\"\n % (total_steps_taken, op.Params.geom_maxiter)\n )\n raise OptError(\n \"Maximum number of steps exceeded: {}.\".format(op.Params.geom_maxiter),\n \"OptError\",\n )\n\n else: # Associated with above for loop, executes if break is not reached\n logger.error(\n \"\\tNumber of steps (%d) exceeds maximum for algorithm (%d).\\n\"\n % (step_number + 1, op.Params.alg_geom_maxiter)\n )\n raise AlgError(\"Maximum number of steps exceeded for algorithm\")\n\n # For IRC, save and queue up for the optimization of the next point.\n if op.Params.opt_type == \"IRC\":\n if irc_step_number == op.Params.irc_points:\n logger.info(f\"\\tThe requested {op.Params.irc_points} IRC points have been obtained.\")\n raise IRCendReached()\n else:\n logger.info(\"\\tStarting search for next IRC point.\")\n logger.info(\"\\tClearing old constrained optimization history.\")\n history.oHistory.reset_to_most_recent() # delete old steps\n converged = False\n\n # Catch non-fatal algorithm errors and try modifying internals,\n # changing run-levels, optimization parameters, etc. and start over again.\n except AlgError as AF:\n logger.error(\"\\n\\tCaught AlgError exception\\n\")\n eraseIntcos = False\n\n if AF.linearBends:\n # New linear bends detected; Add them, and continue at current level.\n # from . import bend # import not currently being used according to IDE\n for l in AF.linearBends:\n if l.bend_type == \"LINEAR\": # no need to repeat this code for \"COMPLEMENT\"\n iF = addIntcos.check_fragment(l.atoms, o_molsys)\n F = o_molsys.fragments[iF]\n intcosMisc.remove_old_now_linear_bend(l.atoms, F.intcos)\n F.add_intcos_from_connectivity()\n eraseHistory = True\n elif op.Params.dynamic_level == op.Params.dynamic_level_max:\n logger.critical(\"\\n\\t Current algorithm/dynamic_level is %d.\\n\" % op.Params.dynamic_level)\n logger.critical(\"\\n\\t Alternative approaches are not available or turned on.\\n\")\n raise OptError(\"Maximum dynamic_level reached.\")\n else:\n op.Params.dynamic_level += 1\n logger.warning(\"\\n\\t Increasing dynamic_level algorithm to %d.\\n\" % op.Params.dynamic_level)\n logger.warning(\"\\n\\t Erasing old history, hessian, intcos.\\n\")\n eraseIntcos = True\n eraseHistory = True\n op.Params.updateDynamicLevelParameters(op.Params.dynamic_level)\n\n if eraseIntcos:\n logger.warning(\"\\n\\t Erasing coordinates.\\n\")\n for f in o_molsys.fragments:\n del f.intcos[:]\n\n if eraseHistory:\n logger.warning(\"\\n\\t Erasing history.\\n\")\n step_number = 0\n del H\n H = 0\n del history.oHistory[:] # delete steps in history\n history.oHistory.stepsSinceLastHessian = 0\n history.oHistory.consecutiveBacksteps = 0\n\n # print summary\n logger.info(\"\\tOptimization Finished\\n\" + history.oHistory.summary_string())\n\n if op.Params.opt_type == \"linesearch\":\n logger.info(\"\\tObtaining gradient at the final geometry for line-search optimization\\n\")\n # Calculate gradient to show user\n gX = computer.compute(o_molsys.geom, driver=\"gradient\", return_full=False)\n del gX\n qc_output = prepare_opt_output(o_molsys, computer, error=None)\n\n del H\n del history.oHistory[:]\n o_molsys.clear()\n del op.Params\n return qc_output\n\n # Expect to hit this error. not an issue\n except IRCendReached:\n\n logger.info(\"\\t\\tFinal IRC Point\\n%s\", o_molsys)\n logger.info(\"Tabulating rxnpath results.\")\n IRCdata.history.progress_report()\n np.multiply(-1, IRCdata.history.f_x(-1))\n rxnpath = IRCdata.history.rxnpath_dict()\n\n logger.info(rxnpath)\n\n qc_output = prepare_opt_output(o_molsys, computer, rxnpath=rxnpath, error=None)\n\n # delete some stuff\n del H\n del history.oHistory[:]\n o_molsys.clear()\n del op.Params\n return qc_output\n\n # Fatal error. Cannot proceed.\n except OptError as error:\n logger.critical(\"\\tA critical optimization-specific error has occured.\\n\")\n logger.critical(\"\\tResetting all optimization options for potential queued jobs.\\n\")\n logger.exception(\"Error Type: \" + str(type(error)))\n logger.exception(\"Error caught:\" + str(error))\n # Dump histories if possible\n try:\n logging.debug(\"\\tDumping history: Warning last point not converged.\\n\" + history.oHistory.summary_string())\n if op.Params.opt_type == \"IRC\":\n logging.info(\"\\tDumping IRC points completed\")\n IRCdata.history.progress_report()\n del history.oHistory[:]\n except NameError:\n pass\n\n rxnpath = None\n if op.Params.opt_type == \"IRC\":\n rxnpath = IRCdata.history.rxnpath_dict()\n logger.debug(rxnpath)\n\n qc_output = prepare_opt_output(o_molsys, computer, rxnpath=rxnpath, error=error)\n\n del history.oHistory[:]\n o_molsys.clear()\n del op.Params\n del computer\n\n return qc_output\n\n except Exception as error:\n logger.critical(\"\\tA non-optimization-specific error has occurred.\\n\")\n logger.critical(\"\\tResetting all optimization options for potential queued jobs.\\n\")\n logger.exception(\"Error Type: \" + str(type(error)))\n logger.exception(\"Error caught:\" + str(error))\n\n rxnpath = None\n if len(history.oHistory.steps) >= 1:\n rxnpath = None\n if op.Params.opt_type == \"IRC\":\n rxnpath = IRCdata.history.rxnpath_dict()\n logger.debug(rxnpath)\n\n qc_output = prepare_opt_output(o_molsys, computer, rxnpath=rxnpath, error=error)\n\n del history.oHistory[:]\n o_molsys.clear()\n del op.Params\n del computer\n\n return qc_output",
"def solver_mll(X, y, C, S, alpha=0.1, max_iter=1000, tol=1e-4, positive=False):\n n_tasks, n_samples, n_features = X.shape\n lasso = Lasso(alpha=alpha, fit_intercept=False, positive=positive)\n lasso_p = Lasso(alpha=alpha / n_tasks, fit_intercept=False,\n positive=True)\n old_theta = C[:, None] * S\n\n for i in range(max_iter):\n W = X * C[None, None, :]\n for k in range(n_tasks):\n lasso.fit(W[k], y[k])\n S[:, k] = lasso.coef_\n Z = S.T[:, None, :] * X\n Z = Z.reshape(n_tasks * n_samples, n_features)\n lasso_p.fit(Z, y.flatten())\n C = lasso_p.coef_\n theta = C[:, None] * S\n dll = abs(theta - old_theta).max()\n dll /= max(abs(theta).max(), abs(old_theta).max(), 1.)\n old_theta = theta.copy()\n\n if dll < tol:\n break\n\n if i == max_iter - 1:\n warnings.warn('Objective did not converge.' +\n ' You might want' +\n ' to increase the number of iterations.' +\n ' Fitting data with very small alpha' +\n ' may cause precision problems.',\n ConvergenceWarning)\n return C, S, i",
"def potentialSolver5(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged",
"def potentialSolver3(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged",
"def potentialSolver3(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged",
"def sweptBlock(solver):\n #Create and fill shared array\n createCPUSharedArray(solver,numpy.zeros(solver.sharedShape,dtype=solver.dtype).nbytes)\n for i in range(solver.intermediate):\n solver.sharedArray[i,:,:,:] = solver.initialConditions[solver.globalBlock]\n #Create phase objects\n solver.Up = geometry.Geometry() \n solver.Down = geometry.Geometry() \n solver.Xb = geometry.Geometry()\n solver.Yb = geometry.Geometry()\n solver.Oct = geometry.Geometry() \n\n if solver.gpuBool:\n # Creating cuda device and context\n cuda.init()\n cuda_device = cuda.Device(solver.gpuRank)\n solver.cuda_context = cuda_device.make_context()\n setupGPUSwept(solver)\n #Setting up CPU\n setupCPUSwept(solver)\n solver.comm.Barrier() #Ensure all processes are",
"def nodal2D_steady_fixed_source(Dims,Lengths,BCs,D,Sigma,Q, tolerance=1.0e-12, phi_solution=0., LOUD=False, maxits=100):\n I = Dims[0]\n J = Dims[1]\n K = Dims[2]\n L = I*J*K\n Nx = Lengths[0]\n Ny = Lengths[1]\n Nz = Lengths[2]\n \n hx,hy,hz = np.array(Lengths)/np.array(Dims)\n ihx2,ihy2,ihz2 = (1.0/hx**2,1.0/hy**2,1.0/hz**2)\n\n if (type(phi_solution) != np.ndarray):\n phi_solution = np.zeros((2,I,J,5))\n phi_new = phi_solution.copy()\n iteration = 1\n converged = 0\n localBCs = np.ones((2,3))\n\n #reshape Q if necessary\n if Q.shape != (I,J,K,5):\n Q_new = np.zeros((I,J,K,5))\n Q_new[:,:,:,0] = Q[:,:,:]\n Q = Q_new\n\n #iterate over the x directions\n k=0\n while not(converged):\n \n #Solve for x direction\n d = 0 #solv direction\n tr_id = 1 #trans direction idx in array\n for j in range(J): #spatial loop over J coordinates\n for i in range(I): #spatial loop over X coordinates\n\n if not(i==0):\n phi_left = phi_solution[d,i-1,j,:]\n C = positive_current(phi_left,hx/2,hx,D[i-1,j,k])\n #print(\"i =\",i,\"Cr =\",C)\n localBCs[0,0:3] = [0.25,-D[i,j,k]/2,C]\n else:\n localBCs[0,:] = BCs[0,:].copy()\n localBCs[0,1] *= D[i,j,k]\n if not(i==(I-1)):\n phi_rt = phi_solution[d,i+1,j,:]\n C = negative_current(phi_rt,-hx/2,hx,D[i+1,j,k])\n #print(\"i =\",i,\"Cr =\",C)\n localBCs[1,0:3] = [.25,D[i,j,k]/2,C]\n else:\n localBCs[1,:] = BCs[1,:].copy()\n localBCs[1,1] *= D[i,j,k]\n \n #Compute transverse fluxes\n if i==0:\n nbr_ids = [i,i,i+1] #Assume constant along left edge\n elif i==(I-1):\n nbr_ids = [i-1,i,i] #assume constant along right edge\n else:\n nbr_ids = [i-1,i,i+1] #interior cell\n\n if not j==(J-1):\n top_phis = phi_solution[tr_id,nbr_ids,j,:]\n top_Ds = D[nbr_ids,j,k]\n Ltop_quad = transverse_leakage_dof(top_phis,hy/2.,hy,hx,top_Ds)\n else:\n top_phis = phi_solution[tr_id,nbr_ids,j,:]\n top_Ds = D[nbr_ids,j,k]\n Ltop_quad = transverse_leakage_dof(top_phis,hy/2.,hy,hx,top_Ds)\n #Ltop_quad = (0., 0, 0)\n\n if not j==0:\n bot_phis = phi_solution[tr_id,nbr_ids,j,:]\n bot_Ds = D[nbr_ids,j,k]\n Lbot_quad = transverse_leakage_dof(bot_phis,-hy/2.,hy,hx,bot_Ds)\n else:\n bot_phis = phi_solution[tr_id,nbr_ids,j,:]\n bot_Ds = D[nbr_ids,j,k]\n Lbot_quad = transverse_leakage_dof(bot_phis,-hy/2.,hy,hx,bot_Ds)\n #Lbot_quad = (0.,0,0)\n\n #Add leakages to the Q_local terms\n# print(\"\\n X Information for element: \",i,j)\n# print(\"\\nThe source is: \",Q[i,j,k,0])\n\n Q_local = np.array(Q[i,j,k,:])\n for dof in range(len(Ltop_quad)):\n Q_local[dof] -= 1/hy*(Ltop_quad[dof] - Lbot_quad[dof])\n\n# print(\"The transverse leakage magnitude is: \",-1./hy*(Ltop_quad[0] - Lbot_quad[0]))\n# print(\"Total RHS: \", Q_local[0], Q_local[1])\n\n #Compute the new x fluxes\n phi_new[0,i,j,:] = single_node1GVacuum(D[i,j,k],Sigma[i,j,k],Q_local,hx,localBCs)\n phi,a1,a2,a3,a4 = phi_new[0,i,j,:]\n# print(\"The reaction magnitude: \", phi_new[0,i,j,0]*Sigma[i,j,k])\n# print(\"The current magnitude: \",1./hx*(current(phi_new[0,i,j,:],hx/2,hx,D[i,j,k]) - current(phi_new[0,i,j,:],-hx/2,hx,D[i,j,k])))\n# print(\"\")\n\n #print(i,\"incoming current on left =\", localBCs[0,2],positive_current(phi_new[i,:],-h/2,h,D[i]) )\n if 0*(i>0):\n print(i,\"outgoing current on left =\", negative_current(phi_new[0,i-1,j,:],hx/2,hx,D[i-1,j,k]),\n negative_current(phi_new[0,i,j,:],-hx/2,hx,D[i,j,k]) )\n if 0*(i<I-1):\n print(i,\"outgoing current on right =\", positive_current(phi_new[0,i+1,j,:],-hx/2,hx,D[i+1,j,k]),\n positive_current(phi_new[0,i,j,:],hx/2,hx,D[i,j,k]) )\n #print(i,\"incoming current on right =\", localBCs[1,2],negative_current(phi_new[i,:],h/2,h,D[i]) )\n #print(\"zone \",i,\" current in at right:\",localBCs[1,2],\" current out at right:\",current_left)\n\n \n #Solve for y direction\n d = 1 #solv direction\n tr_id = 0 #trans direction idx in array\n for j in range(J): #spatial loop over J coordinates\n for i in range(I): #spatial loop over X coordinates\n\n if not(j==0):\n phi_left = phi_solution[d,i,j-1,:]\n C = positive_current(phi_left,hy/2,hy,D[i,j-1,k])\n #print(\"i =\",i,\"Cr =\",C)\n localBCs[0,0:3] = [0.25,-D[i,j,k]/2,C]\n else:\n localBCs[0,:] = BCs[2,:].copy()\n localBCs[0,1] *= D[i,j,k]\n if not(j==(J-1)):\n phi_rt = phi_solution[d,i,j+1,:]\n C = negative_current(phi_rt,-hy/2,hy,D[i,j+1,k])\n #print(\"i =\",i,\"Cr =\",C)\n localBCs[1,0:3] = [.25,D[i,j,k]/2,C]\n else:\n localBCs[1,:] = BCs[3,:].copy()\n localBCs[1,1] *= D[i,j,k]\n \n #Compute transverse fluxes\n if j==0:\n nbr_ids = [j,j,j+1] #Assume constant along left edge\n elif j==(J-1):\n nbr_ids = [j-1,j,j] #assume constant along right edge\n else:\n nbr_ids = [j-1,j,j+1] #interior cell\n\n if not i==(I-1):\n rgt_phis = phi_solution[tr_id,i,nbr_ids,:]\n rgt_Ds = D[i,nbr_ids,k]\n Lrgt_quad = transverse_leakage_dof(rgt_phis,hx/2.,hx,hy,rgt_Ds)\n# print(\"Leakage right\",Lrgt_quad)\n# print(\"Just the right leakage\",current(phi_solution[0,i,j,:],hx/2.,hx,D[i,j,k]))\n# print(\"Right outflow, inflow\",positive_current(phi_solution[0,i,j,:],hx/2,hx,D[i,j,k]),\n# negative_current(phi_solution[0,i,j,:],hx/2,hx,D[i,j,k]))\n else:\n rgt_phis = phi_solution[tr_id,i,nbr_ids,:]\n rgt_Ds = D[i,nbr_ids,k]\n Lrgt_quad = transverse_leakage_dof(rgt_phis,hx/2.,hx,hy,rgt_Ds)\n# print(\"Leakage right\",Lrgt_quad)\n# print(\"Just the right leakage\",current(phi_solution[0,i,j,:],hx/2.,hx,D[i,j,k]))\n# print(\"Right outflow, inflow\",positive_current(phi_solution[0,i,j,:],hx/2,hx,D[i,j,k]),\n# negative_current(phi_solution[0,i,j,:],hx/2,hx,D[i,j,k]))\n\n if not i==0:\n lft_phis = phi_solution[tr_id,i,nbr_ids,:]\n lft_Ds = D[i,nbr_ids,k]\n Llft_quad = transverse_leakage_dof(lft_phis,-hx/2.,hx,hy,lft_Ds)\n else:\n lft_phis = phi_solution[tr_id,i,nbr_ids,:]\n lft_Ds = D[i,nbr_ids,k]\n Llft_quad = transverse_leakage_dof(lft_phis,-hx/2.,hx,hy,lft_Ds)\n #Llft_quad = (0.,0,0)\n\n #Add leakages to the Q_local terms\n Q_local = np.array(Q[i,j,k,:])\n# print(\"\\n Y Information for element: \",i,j)\n# print(\"\\nThe source is: \",Q[i,j,k,0])\n for dof in range(len(Lrgt_quad)):\n Q_local[dof] -= 1/hx*(Lrgt_quad[dof] - Llft_quad[dof])\n# print(\"The transverse leakage magnitude is: \",-1./hx*(Lrgt_quad[0] - Llft_quad[0]))\n# print(\"Total RHS: \", Q_local[0], Q_local[1])\n\n phi_new[1,i,j,:] = single_node1GVacuum(D[i,j,k],Sigma[i,j,k],Q_local,hy,localBCs)\n# print(\"The reaction magnitude: \", phi_new[1,i,j,0]*Sigma[i,j,k])\n# print(\"The current magnitude: \",1./hy*(current(phi_new[1,i,j,:],hy/2,hy,D[i,j,k]) - current(phi_new[1,i,j,:],-hy/2,hy,D[i,j,k])))\n# print(\"\")\n phi,a1,a2,a3,a4 = phi_new[1,i,j,:]\n #print(i,\"incoming current on left =\", localBCs[0,2],positive_current(phi_new[i,:],-h/2,h,D[i]) )\n if 0*(i>0):\n print(i,\"outgoing current on left =\", negative_current(phi_new[i-1,:],h/2,h,D[i]),negative_current(phi_new[i,:],-h/2,h,D[i]) )\n if 0*(i<I-1):\n print(i,\"outgoing current on right =\", positive_current(phi_new[i+1,:],-h/2,h,D[i]),positive_current(phi_new[i,:],h/2,h,D[i]) )\n #print(i,\"incoming current on right =\", localBCs[1,2],negative_current(phi_new[i,:],h/2,h,D[i]) )\n #print(\"zone \",i,\" current in at right:\",localBCs[1,2],\" current out at right:\",current_left)\n\n# print(\"X solution\", phi_new[0,:,:,0])\n# print(\"Y solution\", phi_new[1,:,:,0])\n\n #Compute total change in x and y\n relchange = np.linalg.norm( np.reshape(phi_new-phi_solution, 5*I*J*K*2))/np.linalg.norm( np.reshape(phi_new, 5*I*J*K*2))\n reldiff = np.linalg.norm( np.reshape(phi_new[0,:,:,0] - phi_new[1,:,:,0], I*J*K)/np.linalg.norm( np.reshape(phi_new[0,:,:,0],I*J*K)) )\n converged = (relchange < tolerance) or (iteration >= maxits)\n if (LOUD):\n print(\"Iteration\",iteration,\": relative change total =\",relchange,\"relative difference X Y\",reldiff)\n iteration += 1 \n phi_solution = phi_new.copy()\n\n\n x = np.linspace(hx*.5,Nx-hx*.5,I)\n y = np.linspace(hy*.5,Ny-hy*.5,J)\n z = np.linspace(hz*.5,Nz-hz*.5,K)\n return x,y,z,phi_solution[0,:,:,0].reshape(I,J,1)#+phi_solution[1,:,:,0].reshape(I,J,1)))",
"def solve_driv(v, ene, s, n, h):\n\n xs = np.array([(k+1)*h for k in range(n)])\n h2 = h*h\n k = np.sqrt(2.0*ene)\n \n vs = [v(x)-ene for x in xs]\n\n mat = laplacian_mat(n) -2.0 * h2 * scipy.sparse.diags(vs, 0) + bc_outgoing_mat(n, h, k)\n vec = np.array([-2.0*h*h*s(x) for x in xs])\n\n ys = scipy.sparse.linalg.spsolve(mat, vec)\n return (xs, ys)",
"def solve(self):\n dim = self.puzzle.dimension\n\n # initial loop\n for value, (row, col) in self.puzzle:\n if value:\n self.clear_row(row, value)\n self.clear_col(col, value)\n self.clear_subgrid(row, col, value)\n self.updates.add((value, (row, col)))\n for ps in self.possibilities:\n ps.discard((row, col))\n\n while self.updates:\n while self.updates:\n # while self.updates:\n value, (row, col) = self.updates.pop()\n for i in range(1, dim + 1):\n self.check_row(i, value)\n self.check_col(i, value)\n for i in range(2, 8, 3):\n self.check_subgrid(row, i, value)\n self.check_subgrid(i, col, value)\n\n for value, (row, col) in self.puzzle:\n if not value:\n self.check_cell(row, col)\n\n # for value in range(1, dim + 1):\n # for row in [2, 5, 8]:\n # for col in [2, 5, 8]:\n # self.check_subgrid(row, col, value)",
"def solve(self, solver):\n solver.solve()",
"def actualSolve(self, lp):\n\t\traise NotImplementedError",
"def solve(self):\n # Use a trivial tour (1-2-3-...-N-1) to set the global upper bound.\n tour = list(range(self._N))\n upper_bound = sum([self._G[i][(i + 1) % self._N] for i in range(self._N)])\n trace = []\n\n # Start from a configuration with a single vertex.\n frontier = [BranchAndBoundConfiguration(self._G, self._N, [0], LOWER_BOUND_METHOD)]\n\n # Set the start time.\n start_time = time.time()\n\n # Branch and bound until the frontier set is empty or the time has expired.\n while frontier and (time.time() - start_time) < self._cutoff_time:\n # Fetch the most promising configuration.\n config = heappop(frontier)\n\n # Expand configuration by appending a vertex to the path.\n for v in range(self._N):\n try:\n expanded_config = config.expand(v)\n except ValueError:\n # Expanded configuration is not valid.\n continue\n if expanded_config.is_solution():\n # Update the global upper bound, if needed.\n this_solution = expanded_config.get_cycle_cost()\n if this_solution < upper_bound:\n # Log it.\n trace.append((time.time() - start_time, this_solution))\n # Update the best solution.\n upper_bound = this_solution\n tour = list(expanded_config.get_path())\n elif expanded_config.get_lower_bound() < upper_bound:\n # Add to the frontier set.\n heappush(frontier, expanded_config)\n return (upper_bound, [self._index_to_id[v] for v in tour], trace)"
] |
[
"0.6437674",
"0.63575566",
"0.63160455",
"0.6253777",
"0.6242397",
"0.61891377",
"0.6080901",
"0.6075275",
"0.60319287",
"0.6008029",
"0.5985438",
"0.5956081",
"0.5951685",
"0.5910511",
"0.5889266",
"0.588267",
"0.5834767",
"0.58335656",
"0.58294004",
"0.5818063",
"0.5789487",
"0.5777485",
"0.5777485",
"0.57704216",
"0.57280904",
"0.571287",
"0.57128173",
"0.57099754",
"0.57089484",
"0.5702032"
] |
0.66944
|
0
|
The argument p is assumed to be some permutation of 0, 1, ..., len(p)1. Returns an array s, where s[i] gives the index of i in p.
|
def invert_permutation(p):
s = np.empty_like(p)
s[p] = np.arange(p.size)
return s
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def perm_invert(p):\n q = [None] * len(p)\n for i, j in enumerate(p):\n q[j] = i\n return q",
"def sample_from(self, p):\n return np.searchsorted(np.cumsum(p), np.random.rand())",
"def permute_2d(m, p):\r\n return m[p][:, p]\r\n # unused below\r\n m_t = transpose(m)\r\n r_t = take(m_t, p, axis=0)\r\n return take(transpose(r_t), p, axis=0)",
"def perm_conjugate(p, s):\n q = [None] * len(p)\n for i in range(len(p)):\n q[s[i]] = s[p[i]]\n return q",
"def gen_rand_index(p, n):\n # TODO Check args here\n \n # TODO: check each value of inverse distribution is\n # different\n invcdf = N.cumsum(p)\n uni = rand(n)\n index = N.zeros(n, dtype=int)\n\n # This one should be a bit faster\n for k in range(len(p)-1, 0, -1):\n blop = N.where(N.logical_and(invcdf[k-1] <= uni, \n uni < invcdf[k]))\n index[blop] = k\n \n return index",
"def sample(a, p):\n if (len(a) != len(p)):\n raise Exception('a != p')\n p = np.array(p)\n p = p / p.sum()\n r = random.random()\n n = len(a)\n total = 0 # range: [0,1]\n for i in xrange(n):\n total += p[i]\n if total > r:\n return a[i]\n return a[i]",
"def pseudorandom(n, p, key):\n import numpy as np\n p = list(p)\n cp = np.cumsum([0] + p)\n assert np.allclose(1, cp[-1])\n assert len(p) < 256\n\n x = np.random.RandomState(key).random_sample(n)\n out = np.empty(n, dtype='i1')\n\n for i, (low, high) in enumerate(zip(cp[:-1], cp[1:])):\n out[(x >= low) & (x < high)] = i\n return out",
"def pflip(P):\n if len(P) == 1:\n return 0\n\n P /= sum(P)\n\n assert math.fabs(1.0-sum(P)) < 10.0**(-10.0)\n\n p_minus = 0\n r = np.random.rand()\n for i in range(len(P)):\n P[i] += p_minus\n p_minus = P[i]\n if r < p_minus:\n return i\n\n raise IndexError(\"pflip:failed to find index\")",
"def lift_perm(p: Dict[int, int]) -> np.ndarray:\n n = len(p)\n pm = np.zeros((1 << n, 1 << n), dtype=complex)\n for i in range(1 << n):\n j = 0\n mask = 1 << n\n for q in range(n):\n mask >>= 1\n if (i & mask) != 0:\n j |= 1 << (n - 1 - p[q])\n pm[j][i] = 1\n return pm",
"def index_to_feature(p, dims):\n feature = []\n for dim in dims:\n feature.append(p % dim)\n p //= dim\n return feature",
"def permute(seq, permutation):\n return [seq[i] for i in permutation]",
"def naive(p, t):\n\toccurence = []\n\tfor i in range(len(t)-len(p) + 1):\n\t\tmatch = True\n\t\tfor j in range(len(p)):\n\t\t\tif not p[j] == t[i+j]:\n\t\t\t\tmatch = False\n\t\t\t\tbreak\n\t\tif match:\n\t\t\toccurence.append(i)\n\treturn occurence",
"def decoder(permutation):\n depermutation = []\n for x in range (0, len (permutation)):\n depermutation.append (permutation.index(x))\n return depermutation",
"def point_location(tri, p): \n simplex_index = tri.find_simplex(p)\n bc = []\n for id_, point in zip(simplex_index, p):\n # Calculate the two first barycentric coordinates for the relevant\n # simplex\n b = tri.transform[id_, :2].dot(point-tri.transform[id_, 2])\n bc.append(np.c_[np.atleast_2d(b), 1-b.sum()])\n # Create the full array and squeeze the shit out of it\n bc = np.array(bc).squeeze()\n return simplex_index, bc",
"def naive(p, t):\n occurences = []\n for i in range(len(t) - len(p) + 1):\n match = True\n for j in range(len(p)):\n if t[i + j] != p[j]:\n match = False\n break\n if match:\n occurences.append(i)\n return occurences",
"def discrete_rv(p):\n u = np.random.uniform()\n cdf = np.cumsum(p)\n j = np.searchsorted(cdf, u)\n return j",
"def _permutation_to_vertex(self, p):\n return (\n tuple(p._labels[0]),tuple(p._labels[1]),\n tuple(p._twin[0]),tuple(p._twin[1]))",
"def values(self, ps):\n ps = np.asarray(ps)\n if np.any(ps < 0) or np.any(ps > 1):\n raise ValueError('Probability p must be in range [0, 1]')\n\n index = np.searchsorted(self.ps, ps, side='left')\n return self.xs[index]",
"def pentagonal_index(P):\n return (1 + sqrt(1 + 24 * P)) / 6",
"def cycles(p: List[int]) -> List[Set[int]]:\n validate_permutation(p)\n\n todo = list(range(len(p)))\n cycles = []\n\n while todo:\n start = todo.pop(0)\n\n cycle = (start,)\n position = p[start]\n\n while position != start:\n todo.remove(position)\n cycle += (position, )\n position = p[position]\n\n cycles.append(cycle)\n\n return cycles",
"def permute(self, arr):\n\n return arr[self.permutation_idxs]",
"def permutation(s):\n if len(s) == 1:\n return [s]\n result = []\n first = s[0]\n ss = s[1:]\n pers = permutation(ss)\n for p in pers:\n for i in range(0,len(p)):\n result.append(p[:i]+first+p[i:])\n return result",
"def _permutation_to_vertex(self, p):\n return (tuple(p._labels[0]),tuple(p._labels[1]),\n tuple(p._twin[0]), tuple(p._twin[1]),\n tuple(p._flips[0]), tuple(p._flips[1]))",
"def sample(x, p=None):\n s = np.random.random_sample()\n if p is None:\n return x[int(s*len(x))]\n else:\n p = np.cumsum(p)\n p = p / float(p[-1])\n return x[sum(s >= p)]",
"def _P(m):\n P = np.zeros((m**2,m**2), dtype=np.int64)\n for i in range(1, m**2 + 1):\n j = 1 + m*((i - 1) % m) + (i - 1)//m\n P[i-1, j-1] = 1\n return P",
"def get_array_index_permutations(param):\n indices = list()\n\n try:\n for d in reversed(param.get(\"dimensions\")):\n i = list()\n for x in range(0, d.get(\"len\")):\n i.append(x)\n indices.append(i)\n\n array_dereferences = list(itertools.product(*indices))\n return array_dereferences\n\n except TypeError:\n return list()",
"def _get_sample(self, p: float) -> np.ndarray:\n return np.where(self.rand_array >= p, 0, 1)",
"def decode_from_P(P):\n N = P.shape[0]\n A = P.shape[1]\n \n X = np.arange(N)\n \n for i in range(N):\n max_val = -1e100\n for a in range(A):\n if P[i,a] > max_val:\n max_val = P[i,a]\n X[i] = a\n \n return X",
"def p2vertices(self, p):\n h = self.top\n verts = np.empty((self.nparams + 2, 2))\n verts[:, 0] = self._modelx\n verts[:, 1] = np.concatenate([[h], p, [h]])\n return verts",
"def perm_vs_hyp():\n\n return [\"P\",\"P\",\"P\",\"P\",\"P\"]"
] |
[
"0.6614275",
"0.5957587",
"0.58996975",
"0.5898854",
"0.58252364",
"0.574952",
"0.57436204",
"0.5726088",
"0.57004726",
"0.5664967",
"0.5657052",
"0.56268764",
"0.55960995",
"0.55443096",
"0.55430853",
"0.5538524",
"0.550933",
"0.55085063",
"0.5503721",
"0.5452929",
"0.543458",
"0.5420169",
"0.54064",
"0.54035187",
"0.54008234",
"0.53937685",
"0.5385929",
"0.5383989",
"0.53809196",
"0.53575355"
] |
0.7273526
|
0
|
Initialize a HOOMD device given the parse arguments.
|
def make_hoomd_device(args):
if args.device == 'CPU':
device = hoomd.device.CPU()
elif args.device == 'GPU':
device = hoomd.device.GPU()
else:
raise ValueError(f'Invalid device {args.device}.')
if not args.verbose:
device.notice_level = 0
return device
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def initialize(self):\n self.ha_url = self.args.get(\"ha_url\", None)\n self.use_current_brightness = self.args.get(\"use_current_brightness\", False)\n self.condition = self.args.get(\"condition\")\n self.lights = self.args[\"lights\"]\n self.listen_state(self.change_lights_color, self.args[\"media_player\"], attribute = self.args.get(\"photo_attribute\", \"entity_picture\"))",
"def init_argument_parser(modules, device=None):\n parser = ArgumentParser()\n if device:\n parser.add_argument('--help-device', action='store_true',\n help='Print help for arguments specific to device')\n modules.append('libregice')\n init_modules_args(device, parser, modules)\n return parser",
"def __init__(self, device_handle):\n\n self.device_handle = device_handle",
"def init(*, args: List[str]) -> None:\n logs.show_presentation()\n execute.parse_args(args=args)",
"def _Parse():\n prog = sys.argv[0]\n example_usage = ('Example:\\n' +\n ' python %s keyboard 00:11:22:33:44:55\\n' % prog +\n ' python %s mouse 00:11:22:33:44:55\\n'% prog)\n parser = argparse.ArgumentParser(\n description='Emulate a HID device.\\n' + example_usage,\n formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument('device',\n choices=['keyboard', 'mouse'],\n help='the device type to emulate')\n parser.add_argument('remote_host_address',\n help='the remote host address')\n parser.add_argument('-c', '--chars_to_send',\n default='echo hello world',\n help='characters to send to the remote host')\n args = parser.parse_args()\n\n if len(args.remote_host_address.replace(':', '')) != 12:\n print '\"%s\" is not a valid bluetooth address.' % args.remote_host_address\n exit(1)\n\n print ('Emulate a %s and connect to remote host at %s' %\n (args.device, args.remote_host_address))\n return args",
"def initialize():\n\n parser = build_arg_parser()\n par = parser.parse_known_args()[0]\n\n # Main arguments.\n set('run_mode', par.run_mode)\n set('input_files', par.image)\n\n # Sub-parser specific arguments.\n if par.run_mode == 'train':\n\n set('batch_size', par.batch_size)\n set('drop', par.drop)\n set('epochs', par.epochs)\n set('model', par.model)\n set('level', par.level)\n set('vfrac', par.vfrac)\n set('data_augm', par.data_augm)\n set('summary', par.summary)\n set('outdir', par.outdir)\n # Parameters associated with super-resolution. \n set('super_resolution', par.super_resolution)\n set('generator', par.generator)\n set('discriminator', par.discriminator)\n\n elif par.run_mode == 'predict':\n\n set('tile_edge', par.edge)\n set('model', par.model)\n set('save_conv2d_kernels', par.save_conv2d_kernels) \n set('save_conv2d_outputs', par.save_conv2d_outputs) \n set('colormap', par.colormap)\n # Parameters associated with super-resolution. \n set('super_resolution', par.super_resolution)\n set('generator', par.generator)\n\n elif par.run_mode == 'diagnose': \n \n set('model', par.model) \n \n else:\n \n pass",
"def parse_args():\n global default_device\n\n parser = argparse.ArgumentParser(description = 'Initialize OATH token',\n add_help=True,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n parser.add_argument('-D', '--device',\n dest='device',\n default=default_device,\n required=False,\n help='YubiHSM device',\n )\n parser.add_argument('--debug',\n dest='debug',\n action='store_true', default=False,\n help='Enable debug operation',\n )\n\n parser.add_argument('--key-handle',\n dest='key_handle',\n required=True,\n help='Key handle to create AEAD',\n metavar='HANDLE',\n )\n parser.add_argument('--oath-k',\n dest='oath_k',\n required=False,\n help='The secret key of the token, hex encoded',\n metavar='HEXSTR',\n )\n\n args = parser.parse_args()\n return args",
"def init_arg_parser(args):\n arg_parser = argparse.ArgumentParser(\n description='Control node for the InMoov robot head. Receives movement commands and calculates trajectory.')\n\n arg_parser.add_argument('-p', '--showplot',\n action='store_true',\n dest='showplot',\n help='Triggers display of plot for calculated trajectory')\n\n arg_parser.add_argument('--version', action='version', version='%(prog)s 0.1a')\n\n args = arg_parser.parse_args(args)\n\n return args",
"def __init__(self, device_dict):\n diff = set(device_dict.keys()) - set(YAMLKeyword.__dict__.keys())\n if len(diff) > 0:\n six.print_('Wrong key detected:')\n six.print_(diff)\n raise KeyError(str(diff))\n self.__dict__.update(device_dict)\n if self.system == SystemType.android:\n pass\n elif self.system == SystemType.arm_linux:\n try:\n sh.ssh('-q', '%s@%s' % (self.username, self.address),\n 'exit')\n except sh.ErrorReturnCode as e:\n six.print_('device connect failed, '\n 'please check your authentication',\n file=sys.stderr)\n raise e",
"def __init__(self,device=None,port=0):\n self.device= Service.initDevice(device)\n self.adbCmd= r'adb -s %s '%self.device\n self.port = port\n if self.port == 0:\n self.port = utils.free_port()",
"def __init__(self, device):\n self._unique_id = device\n self._device = AehW4a1(device)\n self._fan_modes = FAN_MODES\n self._swing_modes = SWING_MODES\n self._preset_modes = PRESET_MODES\n self._attr_available = False\n self._on = None\n self._current_temperature = None\n self._target_temperature = None\n self._attr_hvac_mode = None\n self._fan_mode = None\n self._swing_mode = None\n self._preset_mode = None\n self._previous_state = None",
"def __init__(self, **device_identifiers):\n\n # Connect to the first available device.\n try:\n self.device = usb.core.find(**device_identifiers)\n except usb.core.USBError as e:\n # On some platforms, providing identifiers that don't match with any\n # real device produces a USBError/Pipe Error. We'll convert it into a\n # DeviceNotFoundError.\n if e.errno == LIBUSB_PIPE_ERROR:\n raise DeviceNotFoundError()\n else:\n raise e\n\n # If we couldn't find a board, bail out early.\n if self.device is None:\n raise DeviceNotFoundError()\n\n # For now, supported boards provide a single configuration, so we\n # can accept the first configuration provided.\n self.device.set_configuration()\n\n # Run the parent initialization.\n super(USBCommsBackend, self).__init__(**device_identifiers)",
"def __init__(self):\n self.hw = dev_hwinfo.device()\n self.ethKey=\"Ethernet\"\n self.ethAllInterfaceName=[]\n dir_path = os.path.dirname(os.path.realpath(__file__))\n self.myDefine = init_define.main()\n self.mPlatform=self.hw.getPlatform()",
"def init(self):\r\n self._parse_options(self._force_args)\r\n self._maybe_daemonize()\r\n self._setup_modules()\r\n self._state = self.INITIALIZED",
"def __init__(self, name, host):\n\n self._device = OppleLightDevice(host)\n\n self._name = name\n self._is_on = None\n self._brightness = None\n self._color_temp = None",
"def init(self, *args):\n return self.cmd('init', *args)",
"def __init__(self, args, data_path, data_dir, device, log, x_shape):\r\n self._args = args\r\n self._data_path = data_path\r\n self._data_dir = data_dir\r\n self._device = device\r\n self._x_shape = x_shape\r\n self._log = log",
"def __init__(self, temperature=None, daba=True, *args, **kwargs):\n super(DATA, self).__init__(*args, **kwargs)\n self.daba = daba\n self.temperature = temperature\n self.argv = None\n self.config = None",
"def initialize(self,*args,**kwargs):\n self.__instrumentID = c_uint32(0) \n self.__numInstruments = c_uint32()\n self.__nbrOfChannels = c_uint32()\n self.__nbrADCBits = c_uint32()\n self.__temperature = c_int32()\n self.__time_us = c_double()\n\n self.loadDLLs(**kwargs) # Load the different DLLs or DLL based modules\n self.reinit() # init or reinit the board\n self.createDictAndGlobals() # create dictionaries and global variables\n self.nbrOfChannels=int(self.__nbrOfChannels.value) # duplicate self.nbrOfChannels in a Python type variable \n self.getInitialConfig()",
"def init_device(platform=\"Android\", uuid=None, **kwargs):\n cls = import_device_cls(platform)\n dev = cls(uuid, **kwargs)\n # Add device instance in G and set as current device.\n G.add_device(dev)\n return dev",
"def set_parameters(self, args):\n self.args = args\n\n if args.testing:\n self.delay_close()\n\n if args.source == \"simulation\":\n log.info(\"Create simulated spectra device\")\n self.dev = simulation.SimulatedSpectraDevice()\n\n elif args.source == \"sled\":\n log.info(\"Create single sled cobra\")\n self.dev = simulation.SimulatedCobraSLED()\n\n elif args.source == \"cobra\":\n log.info(\"Create DALSA cobra device\")\n #self.dev = devices.DalsaCobraDevice()\n self.dev = DALSA.Cobra()\n\n elif args.source == \"opto\":\n log.info(\"Create OPTO sensor cobra device\")\n self.dev = DALSA.OPTOCobra()\n\n elif args.source == \"basler\":\n log.info(\"Create DALSA basler device\")\n #self.dev = devices.DalsaBaslerDevice()\n self.dev = DALSA.BaslerSprint4K()\n\n self.dev.setup_pipe()\n self.setup_pipe_timer()",
"def initialise(self, args, environ):",
"def initialize(self, args):\n\t\tpass",
"def _init_system(*args):\n __set_time_elements(args[0], args[1])\n __set_control_elements(args[0], args[2], args[3])\n __set_sensor_elements(args[0], args[4], args[5], args[6], args[7])",
"def __init__(self, root, io):\n parts.hand.Hand.__init__(self, root=root, io=io)\n\n dxl_motors = OrderedDict({\n name: dict(conf)\n for name, conf in self.dxl_motors.items()\n })\n\n self.attach_dxl_motors(dxl_motors)\n\n \"\"\"\n self._load_sensor = self.io.find_module('force_gripper')\n self._load_sensor.offset = 4\n self._load_sensor.scale = 10000\n \"\"\"",
"def __init__(self, hass: HomeAssistantType, entry: ConfigEntry) -> None:\n self.hass = hass\n self.entry = entry\n self.entry_id = entry.entry_id\n self.unique_id = entry.unique_id\n self._host = entry.data.get(CONF_HOST)\n self._port = entry.data.get(CONF_PORT)\n self._ssl = entry.data.get(CONF_SSL)\n self._username = entry.data.get(CONF_USERNAME)\n self._password = entry.data[CONF_PASSWORD]\n\n self._info = None\n self.model = None\n self.device_name = None\n self.firmware_version = None\n\n self._method_version = 1\n consider_home_int = entry.options.get(\n CONF_CONSIDER_HOME, DEFAULT_CONSIDER_HOME.total_seconds()\n )\n self._consider_home = timedelta(seconds=consider_home_int)\n\n self._api: Netgear = None\n self._attrs = {}\n\n self.devices = {}",
"def __init__(self):\n self.server_name = 'Binary Light Device'\n self.device = None",
"def init(self, args, **kwargs):\n # Retrieve configuration file and directory or set defaults.\n conf_file = os.path.expanduser(\n args._get('conf_file', kwargs.pop('conf_file', DEFAULT_CONF_FILE)))\n conf_dir = os.path.expanduser(\n args._get('conf_dir', kwargs.pop('conf_dir', DEFAULT_CONF_DIR)))\n commands = [value for (arg, value) in sorted(args) if arg.startswith('command')]\n\n # Load main configuration file.\n if os.path.exists(conf_file):\n self.load_cmd_file(conf_file)\n\n # Load intermediary configuration files.\n if os.path.isdir(conf_dir):\n self.load_dir(conf_dir, clg.config, commands)",
"def __init__(self, manager, device_config, log_file_name, log_directory):\n super().__init__(\n manager,\n device_config,\n log_file_name=log_file_name,\n log_directory=log_directory)\n self._commands.update(COMMANDS)\n self._regexes.update(REGEXES)\n self._timeouts.update(TIMEOUTS)\n self._serial_port = None",
"def init_parser(parser):\n parser.add_argument(\"--device\", \"-d\",\n help=\"Device to record video from\",\n type=types.connected_android_device,\n default=defaults.connected_android_device()).completer = completion.android_devices\n parser.add_argument(\"--bitrate\", \"-b\",\n help=\"Video bit rate, by default 8000000 (6Mbps)\",\n type=int,\n default=8000000)\n parser.add_argument(\"--timeout\", \"-t\",\n help=\"Maximum video duration, seconds (shouldn't exceed 180)\",\n type=types.adb_video_limit,\n default=180)\n parser.add_argument(\"--compress\", \"-c\",\n help=\"Compress video after recording or not, by default True\",\n type=bool,\n default=True).completer = completion.truefalse"
] |
[
"0.62113965",
"0.608058",
"0.59696805",
"0.5943475",
"0.59068257",
"0.583213",
"0.5805206",
"0.5683287",
"0.5648386",
"0.56437355",
"0.56389016",
"0.56138045",
"0.5604826",
"0.5592298",
"0.5573388",
"0.55678135",
"0.55641365",
"0.5561535",
"0.55565435",
"0.5537574",
"0.55369824",
"0.5531317",
"0.55162776",
"0.5510179",
"0.548618",
"0.5464697",
"0.54606086",
"0.5439942",
"0.54372567",
"0.54372376"
] |
0.6631796
|
0
|
Execute the benchmark and report the performance.
|
def execute(self):
print_verbose_messages = (self.verbose
and self.device.communicator.rank == 0)
# Ensure that all ops are attached (needed for is_tuning_complete).
self.run(0)
if print_verbose_messages:
print(f'Running {type(self).__name__} benchmark')
if print_verbose_messages:
print(f'.. warming up for {self.warmup_steps} steps')
self.run(self.warmup_steps)
if (isinstance(self.device, hoomd.device.GPU)
and hasattr(self.sim.operations, 'is_tuning_complete')):
while not self.sim.operations.is_tuning_complete:
if print_verbose_messages:
print('.. autotuning GPU kernel parameters for '
f'{self.warmup_steps} steps')
self.run(self.warmup_steps)
if print_verbose_messages:
print(f'.. running for {self.benchmark_steps} steps '
f'{self.repeat} time(s)')
# benchmark
performance = []
if isinstance(self.device, hoomd.device.GPU):
with self.device.enable_profiling():
for i in range(self.repeat):
self.run(self.benchmark_steps)
performance.append(self.get_performance())
if print_verbose_messages:
print(f'.. {performance[-1]} {self.units}')
else:
for i in range(self.repeat):
self.run(self.benchmark_steps)
performance.append(self.get_performance())
if print_verbose_messages:
print(f'.. {performance[-1]} {self.units}')
return performance
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def benchmark(self):\n logger.info(self.benchmark.__doc__)\n return self.run(self.benchmark_profile())",
"def _run_benchmark(self, params):\n logging.info('Running benchmark [%s]', self._get_name())\n params = benchmark_cnn.setup(params)\n bench = benchmark_cnn.BenchmarkCNN(params)\n bench.print_info()\n stats = bench.run()\n extras = {}\n extras['examples_per_sec'] = stats.get('images_per_sec')\n if 'last_average_loss' in stats:\n extras['last_average_loss'] = stats['last_average_loss']\n if 'top_1_accuracy' in stats:\n extras['top_1_accuracy'] = stats['top_1_accuracy']\n if 'top_5_accuracy' in stats:\n extras['top_5_accuracy'] = stats['top_5_accuracy']\n self.report_benchmark(\n iters=stats.get('num_steps'),\n wall_time=stats.get('average_wall_time'),\n extras=extras)",
"def main(cls):\n parser = cls.make_argument_parser()\n args = parser.parse_args()\n args.device = make_hoomd_device(args)\n benchmark = cls(**vars(args))\n performance = benchmark.execute()\n\n if args.device.communicator.rank == 0:\n print(f'{numpy.mean(performance)}')",
"def Run(benchmark_spec):\n vms = benchmark_spec.vms\n master_vm = vms[0]\n run_command = 'cd %s && %s ./%s' % (hpcg.HPCG_DIR,\n _GetEnvironmentVars(benchmark_spec),\n RUN_SCRIPT)\n output, _ = master_vm.RobustRemoteCommand(run_command)\n return _MakeSamplesFromOutput(benchmark_spec, output)",
"def run_benchmark(curl, benchmark, test_config = TestConfig()):\n\n warmup_runs = benchmark.warmup_runs\n benchmark_runs = benchmark.benchmark_runs\n message = '' #Message is name of benchmark... print it?\n\n if (warmup_runs <= 0):\n raise Exception(\"Invalid number of warmup runs, must be > 0 :\" + warmup_runs)\n if (benchmark_runs <= 0):\n raise Exception(\"Invalid number of benchmark runs, must be > 0 :\" + benchmark_runs)\n\n #Initialize variables to store output\n output = BenchmarkResult()\n output.name = benchmark.name\n output.group = benchmark.group\n metricnames = list(benchmark.metrics)\n metricvalues = [METRICS[name] for name in metricnames] # Metric variable for curl, to avoid hash lookup for every metric name\n results = [list() for x in xrange(0, len(metricnames))] # Initialize arrays to store results for each metric\n\n curl.setopt(pycurl.WRITEFUNCTION, lambda x: None) #Do not store actual response body at all.\n\n #Benchmark warm-up to allow for caching, JIT compiling, on client\n logging.info('Warmup: ' + message + ' started')\n for x in xrange(0, warmup_runs):\n if benchmark.method == u'POST' or benchmark.method == u'PUT':\n curl.setopt(curl.READFUNCTION, StringIO.StringIO(benchmark.body).read)\n curl.perform()\n logging.info('Warmup: ' + message + ' finished')\n\n logging.info('Benchmark: ' + message + ' starting')\n\n for x in xrange(0, benchmark_runs): # Run the actual benchmarks\n if benchmark.method == u'POST' or benchmark.method == u'PUT':\n curl.setopt(curl.READFUNCTION, StringIO.StringIO(benchmark.body).read)\n\n try: # Run the curl call, if it errors, then add to failure counts for benchmark\n curl.perform()\n except Exception:\n output.failures = output.failures + 1\n continue # Skip metrics collection\n\n # Get all metrics values for this run, and store to metric lists\n for i in xrange(0, len(metricnames)):\n results[i].append( curl.getinfo(metricvalues[i]) )\n\n logging.info('Benchmark: ' + message + ' ending')\n\n temp_results = dict()\n for i in xrange(0, len(metricnames)):\n temp_results[metricnames[i]] = results[i]\n output.results = temp_results\n\n curl.close()\n return analyze_benchmark_results(output, benchmark)",
"def main(ctx: click.Context):\n click.secho(\"MySQL Benchmark\", bold=True)\n results = []\n with click.progressbar(range(ctx.obj[\"count\"])) as bar:\n for number in bar:\n response = requests.get(url=f'{ctx.obj[\"hostname\"]}/api/mysql.php')\n if response.status_code != 200:\n raise click.ClickException(\n f'{ctx.obj[\"hostname\"]}/api/mysql.php Not Found!'\n )\n\n response = requests.get(url=f'{ctx.obj[\"hostname\"]}/api/mysql.php')\n response.raise_for_status()\n results.append(\n BenchmarkResult(\n timestamp=time.time(), number=number, data=response.json()\n )\n )\n time.sleep(ctx.obj[\"sleep\"])\n\n insert_timings = get_timings(results, \"insert\")\n insert_single_transaction_timings = get_timings(\n results, \"insertSingleTransaction\"\n )\n result = {\n \"results\": results,\n \"timings\": {\n \"insert\": calculate_timing_stats(insert_timings),\n \"insert_single_transaction\": calculate_timing_stats(\n insert_single_transaction_timings\n ),\n },\n }\n table = render_table(result)\n click.echo(table)",
"def benchmark(options):\n # Prepare experiments\n with open(options['<benchmark>']) as f:\n benchmark_config = json.loads(f.read())\n generate_agent_configs(benchmark_config)\n experiments = product(benchmark_config['environments'], benchmark_config['agents'], [options])\n\n # Run evaluations\n with Pool(processes=int(options['--processes'])) as pool:\n results = pool.starmap(evaluate, experiments)\n\n # Clean temporary config files\n generate_agent_configs(benchmark_config, clean=True)\n\n # Write evaluations summary\n benchmark_filename = os.path.join(Evaluation.OUTPUT_FOLDER, '{}_{}.{}.json'.format(\n BENCHMARK_FILE, datetime.datetime.now().strftime('%Y%m%d-%H%M%S'), os.getpid()))\n with open(benchmark_filename, 'w') as f:\n json.dump(results, f, sort_keys=True, indent=4)\n gym.logger.info('Benchmark done. Summary written in: {}'.format(benchmark_filename))",
"def run_report(self) -> None:\n t1 = self.t1 or time.time()\n\n dt = t1 - self.t0\n\n if dt and self.max_tasks:\n speed = len(self.statistics) / dt / self.max_tasks\n else:\n speed = 0\n\n LOGGER.info('CRAWLER STATISTICS REPORT')\n\n show = list(self.statistics)\n show.sort(key=lambda stat: str(stat.url))\n\n for stat in show:\n self.log_url_metadata(stat)\n\n LOGGER.info(\n f'Completed parsing {len(self.statistics)} urls in {dt} secs; (max_tasks={self.max_tasks}) ({speed} urls per second per task)', # pylint: disable=C0301 # noqa: E501\n )\n\n LOGGER.info(f'Remaining: {self.queue.qsize()}')\n LOGGER.info(f'Total Statistics: {len(self.statistics)}')\n LOGGER.info(f'Datetime: {time.ctime()} local time')",
"def Run(benchmark_spec):\n vms = benchmark_spec.vms\n results = []\n\n logging.info('Iperf Results:')\n\n # Send traffic in both directions\n for sending_vm, receiving_vm in vms, reversed(vms):\n # Send using external IP addresses\n if vm_util.ShouldRunOnExternalIpAddress():\n results.append(_RunIperf(sending_vm,\n receiving_vm,\n receiving_vm.ip_address,\n 'external'))\n\n # Send using internal IP addresses\n if vm_util.ShouldRunOnInternalIpAddress(sending_vm,\n receiving_vm):\n results.append(_RunIperf(sending_vm,\n receiving_vm,\n receiving_vm.internal_ip,\n 'internal'))\n\n return results",
"def run_benchmark(self, test_config, instance, copy=0):\n # Timestamp and other values added for reporting\n result_dir = self.results_directory(test_config)\n test_config['timestamp'] = int(time.time())\n test_config['workspace'] = self.workspace\n cmd = self._cmd_builder(test_config)\n test_config['cmd'] = cmd\n total_batches = test_config['total_batches']\n\n test_home = os.path.join(self.bench_home, test_config['cmd_path'])\n\n # Write config to results folder\n config_file_out = os.path.join(result_dir, 'config.yaml')\n config_out = open(config_file_out, 'w')\n config_out.write(yaml.dump(test_config))\n config_out.close()\n\n # TODO(tobyboyd@): No longer distributed remove threads.\n worker_threads = []\n i = 0\n cmd = 'cd {}; {}'.format(test_home, cmd)\n print('[{}] worker | Run benchmark({}):{}'.format(\n copy, test_config['test_id'], cmd))\n stdout_file = os.path.join(result_dir, 'worker_%d_stdout.log' % i)\n stderr_file = os.path.join(result_dir, 'worker_%d_stderr.log' % i)\n t = instance.ExecuteCommandInThread(\n cmd, stdout_file, stderr_file, print_error=True)\n worker_threads.append(t)\n\n # Wait for log file to appear\n wait_time = 0\n while t.is_alive() and not os.path.isfile(stdout_file):\n print('Waiting for log file. Waited for {} seconds.'.format(wait_time))\n time.sleep(2)\n wait_time += 2\n\n # TODO(tobyboyd@) fix fragile check for batch to stop on.\n # Example: Epoch: [0][130/40037] Time 0.397\n batch_killer = '{}/'.format(total_batches)\n while t.is_alive():\n with open(stdout_file, 'r') as log:\n for line in log:\n if batch_killer in line:\n print('{} batches complete. Kill Thread.'.format(batch_killer))\n instance.kill_processes()\n break\n time.sleep(5)\n\n for t in worker_threads:\n t.join()\n\n return result_dir",
"def run_tests(self):\n with self.report.timer.record(\"run\"):\n self.result.report.extend(self._run_tests())",
"def run_benchmark():\n import argparse\n parser = argparse.ArgumentParser(description='Benchmark alchemically modified system against unmodified system.')\n parser.add_argument('--platform', dest='platform_name', action='store', default=None, help='platform name to benchmark (default: None)')\n options = parser.parse_args()\n\n from sams.tests import testsystems\n for testsystem_name in ['AblImatinibExplicitAlchemical']:\n cls = getattr(testsystems, testsystem_name)\n testsystem = cls()\n factory_args = { 'ligand_atoms' : testsystem.alchemical_atoms, 'receptor_atoms' : range(0,4266) }\n benchmark(testsystem.system, testsystem.positions, platform_name=options.platform_name, nsteps=5000, timestep=1.0*unit.femtoseconds, factory_args=factory_args)",
"def test_benchmark(self):\n\n proc = subprocess.Popen([\n sys.executable,\n benchmark.__file__,\n self.live_server_ws_url,\n ])\n for _ in range(0, 90, 5):\n time.sleep(5)\n if proc.returncode:\n break\n else:\n proc.terminate()\n proc.wait()\n assert proc.returncode == 0",
"def main():\n logging.basicConfig(level=\"INFO\")\n assert len(sys.argv) == 2, \"Exactly one positional argument (path to the raw dataset) is \"\\\n \"needed. \\n\\nE.g. `python sparsity_benchmark ~/bff_data/final_table`\"\n\n # Prepares data for the benchmark, may take a while\n data_parameters = DATA_PARAMETERS.copy()\n data_parameters[\"input_file\"] = sys.argv[1]\n data_parameters[\"preprocessed_file\"] = os.path.join(\n os.path.dirname(data_parameters[\"input_file\"]),\n \"preprocessed_dataset.pkl\"\n )\n data_preprocessor = preprocess_dataset(data_parameters=data_parameters)\n\n # Note: the features here should be in range [0, ~1.2], according to the original experiments.\n # 0 corresponds to no data, everything else is linearly scaled from dB units.\n features, _ = data_preprocessor.load_dataset()\n\n logging.info(\"Starting benchmarks\")\n noisy_features = benchmark_noise(\n features=features,\n data_parameters=data_parameters,\n experiment_parameters=EXPERIMENT_PARAMETERS\n )\n benchmark_binarization(\n noisy_features=noisy_features,\n data_parameters=data_parameters,\n experiment_parameters=EXPERIMENT_PARAMETERS\n )\n logging.info(\"Done\")",
"def _run():\n subprocess.check_call(\n [\n \"tools/bazel\",\n \"build\",\n \"-c\",\n \"opt\",\n \"test/core/memory_usage/memory_usage_test\",\n ]\n )\n ret = {}\n for name, benchmark_args in _BENCHMARKS.items():\n for scenario, extra_args in _SCENARIOS.items():\n # TODO(chenancy) Remove when minstack is implemented for channel\n if name == \"channel\" and scenario == \"minstack\":\n continue\n try:\n output = subprocess.check_output(\n [\n \"bazel-bin/test/core/memory_usage/memory_usage_test\",\n ]\n + benchmark_args\n + extra_args\n )\n except subprocess.CalledProcessError as e:\n print(\"Error running benchmark:\", e)\n continue\n for line in output.splitlines():\n for key, (pattern, conversion) in _INTERESTING.items():\n m = re.match(pattern, line)\n if m:\n ret[scenario + \": \" + key] = conversion(m.group(1))\n return ret",
"def _run_benchmark(shell, shell_args, name, app, duration_seconds, measurements,\n verbose, android, save_traces):\n timeout = duration_seconds + _EXTRA_TIMEOUT\n benchmark_args = []\n benchmark_args.append('--app=' + app)\n benchmark_args.append('--duration=' + str(duration_seconds))\n\n output_file = None\n device_output_file = None\n if save_traces:\n output_file = 'benchmark-%s-%s.trace' % (name.replace(' ', '_'),\n time.strftime('%Y%m%d%H%M%S'))\n if android:\n device_output_file = os.path.join(shell.get_tmp_dir_path(), output_file)\n benchmark_args.append('--trace-output=' + device_output_file)\n else:\n benchmark_args.append('--trace-output=' + output_file)\n\n for measurement in measurements:\n benchmark_args.append(measurement)\n\n shell_args = list(shell_args)\n shell_args.append(_BENCHMARK_APP)\n shell_args.append('--force-offline-by-default')\n shell_args.append('--args-for=%s %s' % (_BENCHMARK_APP,\n ' '.join(benchmark_args)))\n\n if verbose:\n print 'shell arguments: ' + str(shell_args)\n return_code, output, did_time_out = shell.run_and_get_output(\n shell_args, timeout=timeout)\n\n if did_time_out:\n return False, 'timed out', output\n if return_code:\n return False, 'return code: ' + str(return_code), output\n\n # Pull the trace file even if some measurements are missing, as it can be\n # useful in debugging.\n if device_output_file:\n shell.pull_file(device_output_file, output_file, remove_original=True)\n\n return True, None, output",
"def RunBenchmark(path_to_apk, run_label):\n # `path_to_apk` is similar to `./out/59.0.3071.132_arm_MonochromeStable.apk`\n chrome_version = ChromeVersion(path_to_apk.split('/')[-1].split('_')[0])\n subprocess.call(['adb', 'install', '-r', '-d', path_to_apk])\n subprocess.call([os.path.join(utils.CHROMIUM_SRC, 'tools',\n 'perf', 'run_benchmark'),\n '--browser=android-system-chrome',\n '--pageset-repeat=1', # could remove this later\n '--results-label=%s' % str(chrome_version),\n # TODO(wangge):not sure if we should run in compatibility\n # mode even for the later version, probably add a check in\n # caller to determine if we should run it in compatibility\n # mode and add an argument `run_in_compatibility_mode` to\n # the `RunBenchmark` function\n '--compatibility-mode=no-field-trials',\n '--compatibility-mode=ignore-certificate-errors',\n '--compatibility-mode=legacy-command-line-path',\n '--compatibility-mode=gpu-benchmarking-fallbacks',\n '--story-filter=wikipedia', # could remove this\n # thinking of adding an argument to the tool to set this\n '--output-dir=%s' % os.path.join(\n utils.APP_ROOT, 'results', run_label,\n str(chrome_version.milestone)),\n # thinking of adding an argument to the tool to set this too\n 'system_health.memory_mobile'])",
"def run(self):\n self.speed_test.start()",
"def main(benchmark, size=None, backend=None, repetitions=None, burnin=1, device=\"cpu\"):\n try:\n bm_module, bm_identifier = get_benchmark_module(benchmark)\n except ImportError as e:\n click.echo(f\"Error while loading benchmark {benchmark}: {e!s}\", err=True)\n raise click.Abort()\n\n available_backends = set(bm_module.__implementations__)\n\n if len(backend) == 0:\n backend = available_backends.copy()\n else:\n backend = set(backend)\n\n unsupported_backends = [b for b in backend if b not in available_backends]\n\n for b in unsupported_backends:\n click.echo(\n f'Backend \"{b}\" is not supported by chosen benchmark (skipping)', err=True\n )\n backend.remove(b)\n\n for b in backend.copy():\n try:\n with setup_functions[b](device=device) as bmod:\n click.echo(f\"Using {b} version {bmod.__version__}\")\n except BackendNotSupported as e:\n click.echo(\n f'Setup for backend \"{b}\" failed (skipping), reason: {e!s}', err=True\n )\n backend.remove(b)\n\n try:\n check_backend_conflicts(backend, device)\n except BackendConflict as exc:\n click.echo(f\"Backend conflict: {exc!s}\", err=True)\n raise click.Abort()\n\n runs = sorted(itertools.product(backend, size))\n\n if len(runs) == 0:\n click.echo(\"Nothing to do\")\n return\n\n timings = {run: [] for run in runs}\n\n if repetitions is None:\n click.echo(\"Estimating repetitions...\")\n repetitions = {}\n\n for b, s in runs:\n # use end-to-end runtime for repetition estimation\n def run_func():\n run = bm_module.get_callable(b, s, device=device)\n with setup_functions[b](device=device):\n run()\n\n repetitions[(b, s)] = estimate_repetitions(run_func)\n else:\n repetitions = {(b, s): repetitions for b, s in runs}\n\n all_runs = list(\n itertools.chain.from_iterable(\n [run] * (repetitions[run] + burnin) for run in runs\n )\n )\n random.shuffle(all_runs)\n\n results = {}\n checked = {r: False for r in runs}\n\n pbar = click.progressbar(\n label=f\"Running {len(all_runs)} benchmarks...\", length=len(runs)\n )\n\n try:\n with pbar:\n for (b, size) in all_runs:\n with setup_functions[b](device=device):\n run = bm_module.get_callable(b, size, device=device)\n with Timer() as t:\n res = run()\n\n # YOWO (you only warn once)\n if not checked[(b, size)]:\n if size in results:\n is_consistent = check_consistency(\n results[size], convert_to_numpy(res, b, device)\n )\n if not is_consistent:\n click.echo(\n f\"\\nWarning: inconsistent results for size {size}\",\n err=True,\n )\n else:\n results[size] = convert_to_numpy(res, b, device)\n checked[(b, size)] = True\n\n timings[(b, size)].append(t.elapsed)\n pbar.update(1.0 / (repetitions[(b, size)] + burnin))\n\n # push pbar to 100%\n pbar.update(1.0)\n\n for run in runs:\n assert len(timings[run]) == repetitions[run] + burnin\n\n finally:\n stats = compute_statistics(timings)\n click.echo(format_output(stats, bm_identifier, device=device))",
"def run_benchmark(env: Env, in_file):\n\n print('Running benchmarks in', in_file.name)\n # Run file_path through mlir_to_bef and bef_executor and extract the\n # benchmark result.\n return env.run_mlir(in_file.read())",
"def Stop():\n\n if global_options.loglevel >= 1 and global_benchmark:\n t = time.time() - global_starting_time\n global_options.stdlog.write(\n \"######### Time spent in benchmarked functions #########\\n\")\n global_options.stdlog.write(\"# function\\tseconds\\tpercent\\n\")\n for key, value in global_benchmark.items():\n global_options.stdlog.write(\n \"# %s\\t%6i\\t%5.2f%%\\n\" % (key, value,\n (100.0 * float(value) / t)))\n global_options.stdlog.write(\n \"#######################################################\\n\")\n\n if global_options.loglevel >= 1:\n global_options.stdlog.write(getFooter() + \"\\n\")\n\n # close files\n if global_options.stdout != sys.stdout:\n global_options.stdout.close()\n # do not close log, otherwise error occurs in atext.py\n # if global_options.stdlog != sys.stdout:\n # global_options.stdlog.close()\n\n if global_options.stderr != sys.stderr:\n global_options.stderr.close()\n\n if global_options.timeit_file:\n\n outfile = open(global_options.timeit_file, \"a\")\n\n if global_options.timeit_header:\n outfile.write(\"\\t\".join(\n (\"name\", \"wall\", \"user\", \"sys\", \"cuser\", \"csys\",\n \"host\", \"system\", \"release\", \"machine\",\n \"start\", \"end\", \"path\", \"cmd\")) + \"\\n\")\n\n csystem, host, release, version, machine = map(str, os.uname())\n uusr, usys, c_usr, c_sys = map(lambda x: \"%5.2f\" % x, os.times()[:4])\n t_end = time.time()\n c_wall = \"%5.2f\" % (t_end - global_starting_time)\n\n if sys.argv[0] == \"run.py\":\n cmd = global_args[0]\n if len(global_args) > 1:\n cmd += \" '\" + \"' '\".join(global_args[1:]) + \"'\"\n else:\n cmd = sys.argv[0]\n\n result = \"\\t\".join((global_options.timeit_name,\n c_wall, uusr, usys, c_usr, c_sys,\n host, csystem, release, machine,\n time.asctime(time.localtime(global_starting_time)),\n time.asctime(time.localtime(t_end)),\n os.path.abspath(os.getcwd()),\n cmd)) + \"\\n\"\n\n outfile.write(result)\n outfile.close()",
"def benchmark_profile(self):\n cb_bin = os.path.join(bin_path, 'compilebench')\n desc = \"benchmark\"\n test_name = \"compilebench_{0}\".format(to_safe_name(desc))\n test = TestProfile(\n name=test_name,\n desc=desc,\n test_path=self.test_path,\n bin_path=bin_path,\n command=\"{0} -D {1} -i 10 --makej\".format(cb_bin, self.test_path))\n\n return test",
"def measure(self):\n # --- perform repeated runs\n for i_run in range(self.n_runs):\n if self.verbosity > 0:\n print(\"Run {0} / {1} ...\".format(i_run, self.n_runs), end = '')\n tdelta = self._timed_execute()\n self._run_times[i_run] = tdelta\n\t\t\t\n if self.verbosity == 2:\n print(tdelta)\n \n # calculate mean\n self._tmean = np.mean(self._run_times)\n # calculate standard deviation\n self._tstdev = np.std(self._run_times)\n # allow access to results\n self.__hasrun = True",
"def main():\n logging.info(\"Testing iOS application performance metrics: application size, launch duration and RAM memory usage!\")\n\n try:\n args = parse_args()\n\n TEST_RESULTS = run_tests(args)\n test_summary = create_test_summary(args, TEST_RESULTS)\n write_results_to_file(TEST_RESULTS, RESULTS_FILE, test_summary, SUMMARY_FILE)\n report_tests(args, test_summary)\n\n except Exception as e:\n logging.error(\"Testing performance of application failed with error '{ERROR}'\".format(ERROR=e))",
"def main():\n known_args, unknown_args = parse_known_args()\n if not unknown_args:\n # return an error message if no command is provided\n sys.exit(\"Please provide a command to benchmark: $ humann_benchmark COMMAND\")\n try:\n process = subprocess.Popen(\" \".join(unknown_args),shell=True)\n except (EnvironmentError, subprocess.CalledProcessError):\n sys.exit(\"Unable to execute command: \" + \" \".join(unknown_args))\n pid=str(process.pid)\n start=time.time()\n max_memory=0\n while process.poll() is None:\n time.sleep(1)\n # while the process is running check on the memory use\n # get the pids of the main process and all children (and their children)\n pids=get_pids(pid)\n stdout=subprocess.check_output([\"ps\",\"--pid\",\",\".join(pids),\"-o\",\"pid,rss,command\"]).decode(\"utf-8\")\n print(\"\\n\"+stdout+\"\\n\")\n # remove the header from the process output\n status=[i.split() for i in filter(lambda x: x, stdout.split(\"\\n\")[1:])]\n # memory is the sum of all rss\n memory=sum(int(i[1]) for i in status)\n if memory > max_memory:\n max_memory=memory\n \n end=time.time()\n print(\"Time: {:.0f} minutes\".format((end-start)/60))\n print(\"Max Memory (RSS): {:.1f} GB\".format(max_memory*1.0/1024**2))",
"def run(self, train=True):\n\n # generate synthetic measurements\n start = time()\n self.data = self.measure()\n\n # build graphs\n self.graphs = self.build_graphs()\n\n # train annotation object\n if train and self.train_globally:\n self.annotator = self.train(*list(self.graphs.values()),\n attribute=self.attribute,\n **self.training_kw)\n\n elif not self.train_globally:\n self.annotator = None\n\n # evaluate benchmarks\n self.results = self.evaluate_benchmarks()\n self.runtime = time() - start",
"def execute(args, suite, benchmark, num_iters):\n\n p = Popen(args, stderr=PIPE, stdout=PIPE)\n stdout, stderr = p.communicate()\n stdout, stderr = stdout.decode(), stderr.decode()\n\n return DoneExec(suite, benchmark, args, num_iters, p.returncode,\n stdout, stderr)",
"def run(self):\n self.run_measurement()\n self.run_analysis()\n if self.get_param_value('update'):\n self.run_update()",
"def run(self):\n logging.info('running experiment...')\n self._prepare()\n self._load_data()\n self._run()\n self._evaluate()\n self._summarise()\n return True",
"def Run(benchmark_spec):\n _UpdateBenchmarkSpecWithFlags(benchmark_spec)\n vm = benchmark_spec.vms[0]\n if benchmark_spec.tpus:\n # For MLPerf 1.0, the benchmake code of different hardware are different.\n if (benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-32' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-128' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-256' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-512' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-1024' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-2048'):\n run_path = (\n '$HOME/training_results_{version}/Google/benchmarks/{model}/tpu-{tpus}'\n .format(\n version=VERSION.value,\n model=benchmark_spec.benchmark,\n tpus=benchmark_spec.tpu_groups['train'].GetAcceleratorType()))\n code_path = (\n '$HOME/training_results_{version}/Google/benchmarks/{model}/implementations/tpu-{tpus}-{model}'\n .format(\n version=VERSION.value,\n model=benchmark_spec.benchmark,\n tpus=benchmark_spec.tpu_groups['train'].GetAcceleratorType()))\n\n if MASK in benchmark_spec.benchmark:\n model = 'mask_rcnn'\n elif GNMT in benchmark_spec.benchmark:\n model = 'nmt'\n else:\n model = benchmark_spec.benchmark\n\n mlperf_benchmark_cmd = (\n 'cd {code_path} && '\n 'export PYTHONPATH=$(pwd):$(pwd)/{model} && '\n 'cd {model} && '\n '{run_path}/run_and_time.sh'.format(\n code_path=code_path,\n model=model,\n run_path=run_path))\n\n if SSD in benchmark_spec.benchmark:\n mlperf_benchmark_cmd = (\n 'export '\n 'MLP_GCS_RESNET_CHECKPOINT={checkpoint}'\n ' && {cmd}'.format(\n checkpoint=FLAGS.mlperf_gcs_resnet_checkpoint,\n cmd=mlperf_benchmark_cmd))\n else:\n raise ValueError(\n 'MLPerf configurations do not support the hardware in PKB. PKB may '\n 'need to be updated if this is a new TPU type.')\n\n else:\n run_sub_paths = {RESNET: 'resnet/implementations/mxnet',\n TRANSFORMER: 'transformer/implementations/pytorch',\n MINIGO: 'minigo/implementations/tensorflow',\n MASK: 'maskrcnn/implementations/pytorch',\n GNMT: 'gnmt/implementations/pytorch',\n SSD: 'ssd/implementations/pytorch',\n BERT: 'bert/implementations/pytorch',}\n benchmark_path = f'$HOME/training_results_{VERSION.value}/NVIDIA/benchmarks'\n run_path = posixpath.join(benchmark_path,\n run_sub_paths[benchmark_spec.benchmark])\n env = {\n 'DGXSYSTEM': DGXSYSTEM,\n 'NEXP': 1,\n 'PULL': 0,\n 'LOGDIR': f'/tmp/{benchmark_spec.benchmark}',\n }\n envs = {\n RESNET: {},\n TRANSFORMER: {'DATADIR': '/data/wmt/utf8'},\n MINIGO: {'CONT': 'mlperf-nvidia:minigo'},\n MASK: {},\n GNMT: {'DATADIR': '/data/gnmt'},\n SSD: {'DATADIR': '/data'},\n BERT: {}\n }\n env.update(envs[benchmark_spec.benchmark])\n\n run_script = posixpath.join(run_path, 'run_with_docker.sh')\n vm_util.ReplaceText(vm, 'SYSLOGGING=1', 'SYSLOGGING=0', run_script)\n vm_util.ReplaceText(vm, 'docker exec -it', 'docker exec -t', run_script)\n if benchmark_spec.benchmark == RESNET:\n vm_util.ReplaceText(vm, r'mpirun.*run_and_time\\.sh',\n r'.\\/run_and_time.sh', run_script)\n\n env = ' '.join(f'{key}={value}' for key, value in env.items())\n if nvidia_driver.CheckNvidiaGpuExists(vm):\n env = f'{tensorflow.GetEnvironmentVars(vm)} {env}'\n\n mlperf_benchmark_cmd = (\n f'chmod 755 {run_script} && '\n f'cd {run_path} && '\n f'{env} {run_script}')\n\n samples = []\n metadata = _CreateMetadataDict(benchmark_spec)\n stdout, _ = vm.RobustRemoteCommand(mlperf_benchmark_cmd)\n if NONE in FLAGS.mlperf_profiler:\n samples.extend(\n MakeSamplesFromOutput(\n metadata,\n stdout,\n use_tpu=bool(benchmark_spec.tpus),\n model=benchmark_spec.benchmark))\n return samples"
] |
[
"0.7920896",
"0.68559283",
"0.67580193",
"0.6716297",
"0.66414857",
"0.65965855",
"0.6546384",
"0.65423447",
"0.65336007",
"0.6459301",
"0.6339556",
"0.6312379",
"0.62926507",
"0.6286289",
"0.623686",
"0.622832",
"0.61696357",
"0.61670846",
"0.6156982",
"0.6118324",
"0.6117855",
"0.61050993",
"0.61001164",
"0.6088979",
"0.60484916",
"0.6017004",
"0.6016694",
"0.6007208",
"0.59964246",
"0.5988883"
] |
0.7640157
|
1
|
Make an ArgumentParser instance for benchmark options.
|
def make_argument_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--device',
type=str,
choices=['CPU', 'GPU'],
help='Execution device.',
required=True)
parser.add_argument('-N',
type=int,
default=DEFAULT_N,
help='Number of particles.')
parser.add_argument('--rho',
type=float,
default=DEFAULT_RHO,
help='Number density.')
parser.add_argument('--dimensions',
type=int,
choices=[2, 3],
help='Number of dimensions.',
default=DEFAULT_DIMENSIONS)
parser.add_argument('--warmup_steps',
type=int,
default=DEFAULT_WARMUP_STEPS,
help='Number of timesteps to run before timing.')
parser.add_argument('--benchmark_steps',
type=int,
default=DEFAULT_BENCHMARK_STEPS,
help='Number of timesteps to run in the benchmark.')
parser.add_argument('--repeat',
type=int,
default=DEFAULT_REPEAT,
help='Number of times to repeat the run.')
parser.add_argument('-v',
'--verbose',
action='store_true',
help='Verbose output.')
return parser
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def make_argument_parser():\n parser = Benchmark.make_argument_parser()\n parser.add_argument('--skip-reference',\n action='store_true',\n help='Skip the reference simulation run.')\n return parser",
"def arg_parser(cls):\n parser = argparse.ArgumentParser(\n description='{} options'.format(cls.__name__),\n usage=('dotest.py --results-formatter-options='\n '\"--option1 value1 [--option2 value2 [...]]\"'))\n parser.add_argument(\n \"--dump-results\",\n action=\"store_true\",\n help=('dump the raw results data after printing '\n 'the summary output.'))\n return parser",
"def make_argument_parser():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"data_directory\",\r\n help=\"Directory where the data files live.\")\r\n parser.add_argument(\"out\", help=\"Output directory of files.\")\r\n parser.add_argument(\"-t\", \"--test\", action=\"store_true\",\r\n help=(\"Test mode, avoids slow classifiers and uses\"\r\n \" 3 folds\"))\r\n parser.add_argument(\"--folds\", default=10,\r\n help=\"Number of folds for n-fold cross validation\")\r\n parser.add_argument(\"--data_pattern\", default=\"*.mat\",\r\n help=\"Pattern for data files\")\r\n parser.add_argument(\"--label_pattern\", default=\"*.mat\",\r\n help=\"Pattern for label files\")\r\n return parser",
"def build_arg_parser():\n\n main = ArgumentParser(description='AMFinder command-line arguments.',\n allow_abbrev=False,\n formatter_class=RawTextHelpFormatter)\n\n subparsers = main.add_subparsers(dest='run_mode', required=True,\n help='action to be performed.')\n\n _ = training_subparser(subparsers)\n _ = prediction_subparser(subparsers)\n _ = diagnostic_subparser(subparsers)\n\n return main",
"def set_options():\n parser = argparse.ArgumentParser(description='test hexrd.quadrature')\n\n return parser",
"def cmd_line_parser():\n usage = \"usage: %prog [options]\\n\"\n opt_parser = OptionParser(usage=usage)\n opt_parser.add_option(\"--ai\", action=\"store\", dest=\"alternative_input\",\n help=\"an alternative input file (works only with load_from_pickle)\")\n opt_parser.add_option(\"--dl\", action=\"store\", dest=\"dumped_lexicon\",\n help=\"a dumped lexicon file (works only with load_from_pickle\")\n opt_parser.add_option(\"--dotest\", action=\"store_true\", dest=\"dotest\", default=False,\n help=\"use this flag if you want to apply testing\")\n opt_parser.add_option(\"-t\", action=\"store\", dest=\"test_parses\",\n help=\"the output file for the test parses\")\n opt_parser.add_option(\"-n\", action=\"store\", dest=\"train_parses\",\n help=\"the output file for the train parses\")\n opt_parser.add_option(\"-i\", dest=\"inp_file\", default=\"trainFiles/trainPairs\",\n help=\"the input file names (with the annotated corpus)\")\n opt_parser.add_option(\"--devel\", dest=\"development_mode\", default=False, action=\"store_true\",\n help=\"development mode\")\n\n return opt_parser",
"def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('url', help='url to scrape')\n return parser",
"def MakeOpts():\n parser = ArgumentParser()\n\n parser.add_argument(\"-o\", \"--host\", dest=\"host\", default=\"hldbv02\",\n help=\"The hostname for the MySQL database\")\n parser.add_argument('-d', '--debug', action='store_true', default=False,\n help='debug mode, store results in dummy DB')\n \n xml_group = parser.add_mutually_exclusive_group(required=True)\n xml_group.add_argument(\"-x\", \"--xml_filename\", default=None,\n help=\"The filename for a single XML result file\")\n xml_group.add_argument(\"-a\", \"--xml_dir\", default=None,\n help=\"The directory from which to import the latest XML results file\")\n \n parser.add_argument(\"-p\", \"--plate\", default=None, type=int, required=True,\n help=\"The plate number (usually between 1-10) in the robot script\")\n parser.add_argument('exp_id_csv', nargs=1,\n help='the name of the CVS file where the exp_ids are')\n\n return parser",
"def make_parser():\n p = argparse.ArgumentParser(\n description=\"Visualize and analyze error from oblique/straight tag observations\"\n )\n\n p.add_argument(\"-n\", help=\"name of the test in the config file\")\n\n p.add_argument(\"-t\", help=\"throw out bad tags\", action=\"store_true\")\n\n p.add_argument(\"-v\", help=\"visualize data\", action=\"store_true\")\n\n p.add_argument(\"-i\", help=\"print result data\", action=\"store_true\")\n\n return p",
"def _create_parser(self):\n default_options = self._create_defaults()\n\n all_categories = ['build', 'whitespace']\n\n mock_stderr = self._MockStdErr()\n\n return ArgumentParser(\n all_categories=all_categories,\n base_filter_rules=[],\n default_options=default_options,\n mock_stderr=mock_stderr,\n usage='test usage')",
"def get_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', type=str)\n parser.add_argument('--method', type=str)\n parser.add_argument('--size_part', type=float, default=None)\n parser.add_argument('--start', type=int, default=0)\n parser.add_argument('--count', type=int, default=None)\n return parser",
"def setup_options_parser(self, argparser):\n pass",
"def command_line_argument_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(\n description=description(),\n epilog=epilog(),\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n return parser",
"def command_line_argument_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(\n description=description(),\n epilog=epilog(),\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n return parser",
"def build_argument_parser():\n description=\"A simple tool to batch rename given files.\"\n parser = ArgumentParser(description=description)\n parser.add_argument(\"-i\", \"--input-list\", required=False,\n help=\"the path to the input list file.\")\n parser.add_argument(\"-p\", \"--glob-pattern\", default=DEFAULT_GLOB_PATTERN,\n help=\"a glob pattern to filter input files.\")\n return parser",
"def create_basic_parse():\n # SEE: https://docs.python.org/3/library/argparse.html\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--path_cover', type=str, required=True,\n help='path to the csv cover file')\n parser.add_argument('-d', '--path_dataset', type=str, required=False,\n help='path to the dataset location, '\n 'if missing in cover', default=None)\n parser.add_argument('-o', '--path_out', type=str, required=True,\n help='path to the output directory')\n parser.add_argument('--unique', dest='unique', action='store_true',\n help='whether each experiment have unique time stamp')\n parser.add_argument('--visual', dest='visual', action='store_true',\n help='whether visualise partial results')\n parser.add_argument('--lock_expt', dest='lock_thread', action='store_true',\n help='whether lock to run experiment in single thread')\n parser.add_argument('--run_comp_benchmark', action='store_true',\n help='run computation benchmark on the end')\n parser.add_argument('--nb_workers', type=int, required=False, default=1,\n help='number of registration running in parallel')\n return parser",
"def parser(cls, *, with_showtb=False):\n parser = argparse.ArgumentParser(add_help=False)\n parser.add_argument('-v', '--verbose', action='count', default=0,\n help='produce more output')\n parser.add_argument('-q', '--quiet', action='count', default=0,\n help='produce less output')\n parser.add_argument('--dry-run', dest='dryrun', action='store_true',\n default=False, help='do not actually make changes')\n\n if with_showtb:\n parser.add_argument('--traceback', action='store_true',\n default=False, help='do not hide tracebacks')\n\n return parser",
"def make_cli_parser(self):\n super(ContextualArgParser, self).make_cli_parser()\n self.cli_parser.add_option('--num-permutations', type='int',\n default=cbpn.NUM_PERMUTATIONS,\n help=(\"number of permutations for statistics \"\n \"[default: %default]\")\n )\n self.cli_parser.add_option('-s', '--edge-swaps', type='int',\n help=(\"Perform the given number of edge swaps to \"\n \"produce random graphs. [NOTE: using this option \"\n \"changes the algorithm for determining \"\n \"significance of a link between each given pair \"\n \"of terms.]\"\n )\n )\n self.cli_parser.add_option('--no-estimation', dest='estimate',\n action='store_false', default=True,\n help=(\"Do not use p-value estimation, but run the \"\n \"full number of permutations for every pair of \"\n \"annotation terms. [NOTE: this can substantially \"\n \"increase running time.]\"\n )\n )\n self.cli_parser.add_option('--score-correction',\n action='store_true', default=False,\n help=(\"Correct scores for each pair of terms by an \"\n \"\\\"expected\\\" value calculated from the mean \"\n \"expression value.\"\n )\n )",
"def get_parser():\n\n parser = argparse.ArgumentParser(description=textwrap.dedent(\"\"\"\n Downloads and tests the md5 and file size of a given version of Anaconda located in\n http://repo.continuum.io/archive/\n\n The version option (-v) allows you to select a specific version of Anaconda to download and test.\n This will include every system's Anaconda distribution for that version (OSX, Windows, Linux)\n\n The --log option will write the results of these tests to a log file. If not enabled, results\n will be written to stdout.\n\n If you already have Anaconda installers inside the pkgs directory and wish to test those without\n downloading new ones, use the --no-download option. NOTE: You will still need to provide the\n version (-v) of the installers.\n \"\"\"), formatter_class=argparse.RawTextHelpFormatter)\n\n parser.add_argument('--log', action='store_true', dest='log', default=False,\n help=\"save a log of any errors discovered\")\n parser.add_argument('-v', '--version', action='store', default=False,\n help=\"version of Anaconda to download and test\")\n parser.add_argument('--no-download', action='store_true', dest='nodl', default=False,\n help=\"test local anaconda packages in pkgs, rather than download new ones\")\n\n return parser",
"def argument_parser():\n parser = argparse.ArgumentParser(\n description='description',\n formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument('-n','--numcolors', type=int, help=\"Number of colors\", required=True)\n return parser",
"def arg_parse():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-m\", \"--mix\", required=False, help=\"cube shuffle\")\n parser.add_argument(\"-e\", \"--explain\", action=\"store_true\", help=\"Get more explanation about steps\")\n options = parser.parse_args()\n return options",
"def get_args():\n parser = argparse.ArgumentParser(description=\"Arguments for data exploration\")\n parser.add_argument(\"--tokenize\",\n dest=\"tokenize\",\n action=\"store_true\",\n help=\"Tokenize by words and sentences, counting averages/sd for each.\")\n return parser",
"def initCmdLineParser():\n\n # Init parser and all general flags\n logging.debug(\"initiating command line option parser\")\n usage = \"usage: %prog [options]\"\n parser = OptionParser(usage)\n parser.add_option(\"--gen-answer-file\", help=\"Generate a template of an answer file, using this option excludes all other option\")\n parser.add_option(\"--answer-file\", help=\"Runs the configuration in none-interactive mode, extracting all information from the \\\n configuration file. using this option excludes all other option\")\n parser.add_option(\"--no-mem-check\", help=\"Disable minimum memory check\", action=\"store_true\", default=False)\n\n # For each group, create a group option\n for group in controller.getAllGroups():\n groupParser = OptionGroup(parser, group.getKey(\"DESCRIPTION\"))\n\n for param in group.getAllParams():\n cmdOption = param.getKey(\"CMD_OPTION\")\n paramUsage = param.getKey(\"USAGE\")\n optionsList = param.getKey(\"OPTION_LIST\")\n useDefault = param.getKey(\"USE_DEFAULT\")\n if not useDefault:\n if optionsList:\n groupParser.add_option(\"--%s\" % cmdOption, metavar=optionsList, help=paramUsage, choices=optionsList)\n else:\n groupParser.add_option(\"--%s\" % cmdOption, help=paramUsage)\n\n # Add group parser to main parser\n parser.add_option_group(groupParser)\n\n return parser",
"def build_parser(usage, **kwargs):\n return BetterArgumentParser(usage=usage, version=VERSION, **kwargs)",
"def create_arg_parser():\n server_modes = ['builtin', 'waitress']\n\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('h', metavar='HOST', help='Server HOST (e.g. \"localhost\")', type=str)\n parser.add_argument('p', metavar='PORT', help='Server PORT (e.g. \"5001\")', type=int)\n parser.add_argument('m', metavar='SERVER_MODE', help=\", \".join(server_modes), choices=server_modes, type=str)\n parser.add_argument('--debug', help=\"Run builtin server in debug mode\", action='store_true', default=False)\n\n return parser",
"def argParser():\n parser = ArgumentParser(description=('Downloads problems from Project Euler'\n ' and saves copies locally.'))\n parser.add_argument('-s', '--start', type=int, default=1,\n help='The problem number to start the downloads at, default 1.')\n parser.add_argument('-e', '--end', type=int, default=None,\n help='The problem number to end the downloads at, default None.')\n return parser",
"def _createOptionParser():\n usage = \\\n\"\"\"%prog [options] outputFile\n\nMines a large number of concise wine reviews from an online web site, and dumps\nthem to the given filename.\"\"\"\n\n parser = optparse.OptionParser(usage)\n\n parser.add_option('--debug', action='store_true', dest='debug',\n default=False, help='Enables debugging mode [False]')\n\n return parser",
"def create_parser():\n parser = OptionParser()\n\n parser.add_option(\"-s\", \"--script\", dest=\"script\", default='pbs.sh', help=\"Output location\")\n parser.add_option(\"-p\", \"--period\", dest=\"period\", default=\"30\", help=\"qstat period\")\n\n parser.set_usage(\"\"\"%prog [options]\"\"\")\n return parser",
"def create_options():\n optparser = optparse.OptionParser()\n optparser.add_option(\"-f\", \"--filename\", type=\"string\",\n help=\"execute a single unit test file\")\n optparser.add_option(\"-s\", \"--subprocess\", action=\"store_true\",\n default=False,\n help=\"run everything in an own subprocess \"\n \"(default: use a single process)\")\n optparser.add_option(\"-t\", \"--timeout\", type=\"int\", default=70,\n help=\"Timout for subprocesses before being killed \"\n \"(default: 70s per file)\")\n optparser.add_option(\"-v\", \"--verbose\", action=\"store_true\", default=False,\n help=\"be verbose and print anything instantly\")\n optparser.add_option(\"-r\", \"--random\", action=\"store_true\", default=False,\n help=\"randomize the order of tests\")\n optparser.add_option(\"-S\", \"--seed\", type=\"int\",\n help=\"seed the randomizer(useful to \"\n \"recreate earlier randomized test cases)\")\n optparser.add_option(\"-i\", \"--interactive\", action=\"callback\",\n callback=include_tag,\n callback_args=(\"interactive\",),\n help=\"also execute interactive tests\")\n optparser.add_option(\"-e\", \"--exclude\", action=\"callback\",\n callback=exclude_tag, type=\"string\",\n help=\"exclude test containing the tag\")\n optparser.add_option(\"-l\", \"--listtags\", action=\"callback\",\n callback=list_tags,\n help=\"lists all available tags and exits\")\n optparser.add_option(\"--logfile\", type=\"string\",\n help=\"save output to log file\")\n optkeys = [\"filename\",\n \"subprocess\",\n \"timeout\",\n \"random\",\n \"seed\",\n \"verbose\"\n ]\n return optparser, optkeys",
"def getArgumentParser():\n parser = argparse.ArgumentParser(description=\"Script for running optimization for the ZH dark photon SR\")\n parser.add_argument('-i',\n '--infile',\n dest='infile',\n help='Input CSV file',\n default = '/afs/cern.ch/work/s/ssevova/public/dark-photon-atlas/zhdarkphotonml/samples/v09/mc16d_v09_samples.csv')\n parser.add_argument('-o',\n '--output',\n dest='outdir',\n help='Output directory for plots, selection lists, etc',\n default='outdir')\n parser.add_argument('--plotInputs',action='store_true', help='Plot scaled train & test inputs')\n parser.add_argument('--plotOutputs',action='store_true', help='Plot scaled test outputs for given probability range')\n parser.add_argument('--lower',help='Lower limit for conditional filtering')\n parser.add_argument('--upper',help='Upper limit for conditional filtering')\n\n return parser"
] |
[
"0.7983817",
"0.7168031",
"0.70897955",
"0.70692337",
"0.7047852",
"0.6983982",
"0.693783",
"0.6929585",
"0.6902446",
"0.689302",
"0.6882429",
"0.6879765",
"0.68780637",
"0.68780637",
"0.6868575",
"0.6855423",
"0.68533826",
"0.6851654",
"0.68308103",
"0.6825723",
"0.678807",
"0.67698914",
"0.67691976",
"0.6765229",
"0.67618215",
"0.6760568",
"0.67495626",
"0.6745742",
"0.67241704",
"0.672046"
] |
0.7317018
|
1
|
Make an ArgumentParser instance for comparative benchmark options.
|
def make_argument_parser():
parser = Benchmark.make_argument_parser()
parser.add_argument('--skip-reference',
action='store_true',
help='Skip the reference simulation run.')
return parser
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def make_argument_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--device',\n type=str,\n choices=['CPU', 'GPU'],\n help='Execution device.',\n required=True)\n parser.add_argument('-N',\n type=int,\n default=DEFAULT_N,\n help='Number of particles.')\n parser.add_argument('--rho',\n type=float,\n default=DEFAULT_RHO,\n help='Number density.')\n parser.add_argument('--dimensions',\n type=int,\n choices=[2, 3],\n help='Number of dimensions.',\n default=DEFAULT_DIMENSIONS)\n parser.add_argument('--warmup_steps',\n type=int,\n default=DEFAULT_WARMUP_STEPS,\n help='Number of timesteps to run before timing.')\n parser.add_argument('--benchmark_steps',\n type=int,\n default=DEFAULT_BENCHMARK_STEPS,\n help='Number of timesteps to run in the benchmark.')\n parser.add_argument('--repeat',\n type=int,\n default=DEFAULT_REPEAT,\n help='Number of times to repeat the run.')\n parser.add_argument('-v',\n '--verbose',\n action='store_true',\n help='Verbose output.')\n return parser",
"def build_arg_parser():\n\n main = ArgumentParser(description='AMFinder command-line arguments.',\n allow_abbrev=False,\n formatter_class=RawTextHelpFormatter)\n\n subparsers = main.add_subparsers(dest='run_mode', required=True,\n help='action to be performed.')\n\n _ = training_subparser(subparsers)\n _ = prediction_subparser(subparsers)\n _ = diagnostic_subparser(subparsers)\n\n return main",
"def make_argument_parser():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"data_directory\",\r\n help=\"Directory where the data files live.\")\r\n parser.add_argument(\"out\", help=\"Output directory of files.\")\r\n parser.add_argument(\"-t\", \"--test\", action=\"store_true\",\r\n help=(\"Test mode, avoids slow classifiers and uses\"\r\n \" 3 folds\"))\r\n parser.add_argument(\"--folds\", default=10,\r\n help=\"Number of folds for n-fold cross validation\")\r\n parser.add_argument(\"--data_pattern\", default=\"*.mat\",\r\n help=\"Pattern for data files\")\r\n parser.add_argument(\"--label_pattern\", default=\"*.mat\",\r\n help=\"Pattern for label files\")\r\n return parser",
"def arg_parser(cls):\n parser = argparse.ArgumentParser(\n description='{} options'.format(cls.__name__),\n usage=('dotest.py --results-formatter-options='\n '\"--option1 value1 [--option2 value2 [...]]\"'))\n parser.add_argument(\n \"--dump-results\",\n action=\"store_true\",\n help=('dump the raw results data after printing '\n 'the summary output.'))\n return parser",
"def make_parser():\n p = argparse.ArgumentParser(\n description=\"Visualize and analyze error from oblique/straight tag observations\"\n )\n\n p.add_argument(\"-n\", help=\"name of the test in the config file\")\n\n p.add_argument(\"-t\", help=\"throw out bad tags\", action=\"store_true\")\n\n p.add_argument(\"-v\", help=\"visualize data\", action=\"store_true\")\n\n p.add_argument(\"-i\", help=\"print result data\", action=\"store_true\")\n\n return p",
"def cmd_line_parser():\n usage = \"usage: %prog [options]\\n\"\n opt_parser = OptionParser(usage=usage)\n opt_parser.add_option(\"--ai\", action=\"store\", dest=\"alternative_input\",\n help=\"an alternative input file (works only with load_from_pickle)\")\n opt_parser.add_option(\"--dl\", action=\"store\", dest=\"dumped_lexicon\",\n help=\"a dumped lexicon file (works only with load_from_pickle\")\n opt_parser.add_option(\"--dotest\", action=\"store_true\", dest=\"dotest\", default=False,\n help=\"use this flag if you want to apply testing\")\n opt_parser.add_option(\"-t\", action=\"store\", dest=\"test_parses\",\n help=\"the output file for the test parses\")\n opt_parser.add_option(\"-n\", action=\"store\", dest=\"train_parses\",\n help=\"the output file for the train parses\")\n opt_parser.add_option(\"-i\", dest=\"inp_file\", default=\"trainFiles/trainPairs\",\n help=\"the input file names (with the annotated corpus)\")\n opt_parser.add_option(\"--devel\", dest=\"development_mode\", default=False, action=\"store_true\",\n help=\"development mode\")\n\n return opt_parser",
"def create_basic_parse():\n # SEE: https://docs.python.org/3/library/argparse.html\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--path_cover', type=str, required=True,\n help='path to the csv cover file')\n parser.add_argument('-d', '--path_dataset', type=str, required=False,\n help='path to the dataset location, '\n 'if missing in cover', default=None)\n parser.add_argument('-o', '--path_out', type=str, required=True,\n help='path to the output directory')\n parser.add_argument('--unique', dest='unique', action='store_true',\n help='whether each experiment have unique time stamp')\n parser.add_argument('--visual', dest='visual', action='store_true',\n help='whether visualise partial results')\n parser.add_argument('--lock_expt', dest='lock_thread', action='store_true',\n help='whether lock to run experiment in single thread')\n parser.add_argument('--run_comp_benchmark', action='store_true',\n help='run computation benchmark on the end')\n parser.add_argument('--nb_workers', type=int, required=False, default=1,\n help='number of registration running in parallel')\n return parser",
"def make_cli_parser(self):\n super(ContextualArgParser, self).make_cli_parser()\n self.cli_parser.add_option('--num-permutations', type='int',\n default=cbpn.NUM_PERMUTATIONS,\n help=(\"number of permutations for statistics \"\n \"[default: %default]\")\n )\n self.cli_parser.add_option('-s', '--edge-swaps', type='int',\n help=(\"Perform the given number of edge swaps to \"\n \"produce random graphs. [NOTE: using this option \"\n \"changes the algorithm for determining \"\n \"significance of a link between each given pair \"\n \"of terms.]\"\n )\n )\n self.cli_parser.add_option('--no-estimation', dest='estimate',\n action='store_false', default=True,\n help=(\"Do not use p-value estimation, but run the \"\n \"full number of permutations for every pair of \"\n \"annotation terms. [NOTE: this can substantially \"\n \"increase running time.]\"\n )\n )\n self.cli_parser.add_option('--score-correction',\n action='store_true', default=False,\n help=(\"Correct scores for each pair of terms by an \"\n \"\\\"expected\\\" value calculated from the mean \"\n \"expression value.\"\n )\n )",
"def build_argument_parser():\n description=\"A simple tool to batch rename given files.\"\n parser = ArgumentParser(description=description)\n parser.add_argument(\"-i\", \"--input-list\", required=False,\n help=\"the path to the input list file.\")\n parser.add_argument(\"-p\", \"--glob-pattern\", default=DEFAULT_GLOB_PATTERN,\n help=\"a glob pattern to filter input files.\")\n return parser",
"def MakeOpts():\n parser = ArgumentParser()\n\n parser.add_argument(\"-o\", \"--host\", dest=\"host\", default=\"hldbv02\",\n help=\"The hostname for the MySQL database\")\n parser.add_argument('-d', '--debug', action='store_true', default=False,\n help='debug mode, store results in dummy DB')\n \n xml_group = parser.add_mutually_exclusive_group(required=True)\n xml_group.add_argument(\"-x\", \"--xml_filename\", default=None,\n help=\"The filename for a single XML result file\")\n xml_group.add_argument(\"-a\", \"--xml_dir\", default=None,\n help=\"The directory from which to import the latest XML results file\")\n \n parser.add_argument(\"-p\", \"--plate\", default=None, type=int, required=True,\n help=\"The plate number (usually between 1-10) in the robot script\")\n parser.add_argument('exp_id_csv', nargs=1,\n help='the name of the CVS file where the exp_ids are')\n\n return parser",
"def get_parser():\n\n parser = argparse.ArgumentParser(description=textwrap.dedent(\"\"\"\n Downloads and tests the md5 and file size of a given version of Anaconda located in\n http://repo.continuum.io/archive/\n\n The version option (-v) allows you to select a specific version of Anaconda to download and test.\n This will include every system's Anaconda distribution for that version (OSX, Windows, Linux)\n\n The --log option will write the results of these tests to a log file. If not enabled, results\n will be written to stdout.\n\n If you already have Anaconda installers inside the pkgs directory and wish to test those without\n downloading new ones, use the --no-download option. NOTE: You will still need to provide the\n version (-v) of the installers.\n \"\"\"), formatter_class=argparse.RawTextHelpFormatter)\n\n parser.add_argument('--log', action='store_true', dest='log', default=False,\n help=\"save a log of any errors discovered\")\n parser.add_argument('-v', '--version', action='store', default=False,\n help=\"version of Anaconda to download and test\")\n parser.add_argument('--no-download', action='store_true', dest='nodl', default=False,\n help=\"test local anaconda packages in pkgs, rather than download new ones\")\n\n return parser",
"def set_options():\n parser = argparse.ArgumentParser(description='test hexrd.quadrature')\n\n return parser",
"def _create_parser(self):\n default_options = self._create_defaults()\n\n all_categories = ['build', 'whitespace']\n\n mock_stderr = self._MockStdErr()\n\n return ArgumentParser(\n all_categories=all_categories,\n base_filter_rules=[],\n default_options=default_options,\n mock_stderr=mock_stderr,\n usage='test usage')",
"def command_line_argument_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(\n description=description(),\n epilog=epilog(),\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n return parser",
"def command_line_argument_parser() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(\n description=description(),\n epilog=epilog(),\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n return parser",
"def create_cli_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('json_file', action='store',\n type=str, help=('Outlier per threshold file. This file '\n 'should have been generated by the '\n 'calculate_outliers_by_threshold '\n 'script.'))\n return parser",
"def mkOptionParser():\n \n usage = \"\"\"%prog <input.bed> <output.bed> <threshold>\n %prog filters out the lines that don't meet a certain threshold. \"\"\"\n\n parser = OptionParser(usage)\n \n\n return parser",
"def create_argument_parser(cls):\n\n parser = super().create_argument_parser()\n\n # GitHub options\n group = parser.add_argument_group('GitHub arguments')\n\n group.add_argument(\"--owner\", required=True,\n help=\"GitHub owner\")\n group.add_argument(\"--repository\", required=True,\n help=\"GitHub repository\")\n group.add_argument(\"--sleep-for-rate\", dest='sleep_for_rate',\n action='store_true',\n help=\"sleep for getting more rate\")\n group.add_argument(\"--min-rate-to-sleep\", dest='min_rate_to_sleep',\n default=MIN_RATE_LIMIT, type=int,\n help=\"sleep until reset when the rate limit reaches this value\")\n\n return parser",
"def argument_parser():\n parser = argparse.ArgumentParser(\n description='description',\n formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument('-n','--numcolors', type=int, help=\"Number of colors\", required=True)\n return parser",
"def create_options():\n optparser = optparse.OptionParser()\n optparser.add_option(\"-f\", \"--filename\", type=\"string\",\n help=\"execute a single unit test file\")\n optparser.add_option(\"-s\", \"--subprocess\", action=\"store_true\",\n default=False,\n help=\"run everything in an own subprocess \"\n \"(default: use a single process)\")\n optparser.add_option(\"-t\", \"--timeout\", type=\"int\", default=70,\n help=\"Timout for subprocesses before being killed \"\n \"(default: 70s per file)\")\n optparser.add_option(\"-v\", \"--verbose\", action=\"store_true\", default=False,\n help=\"be verbose and print anything instantly\")\n optparser.add_option(\"-r\", \"--random\", action=\"store_true\", default=False,\n help=\"randomize the order of tests\")\n optparser.add_option(\"-S\", \"--seed\", type=\"int\",\n help=\"seed the randomizer(useful to \"\n \"recreate earlier randomized test cases)\")\n optparser.add_option(\"-i\", \"--interactive\", action=\"callback\",\n callback=include_tag,\n callback_args=(\"interactive\",),\n help=\"also execute interactive tests\")\n optparser.add_option(\"-e\", \"--exclude\", action=\"callback\",\n callback=exclude_tag, type=\"string\",\n help=\"exclude test containing the tag\")\n optparser.add_option(\"-l\", \"--listtags\", action=\"callback\",\n callback=list_tags,\n help=\"lists all available tags and exits\")\n optparser.add_option(\"--logfile\", type=\"string\",\n help=\"save output to log file\")\n optkeys = [\"filename\",\n \"subprocess\",\n \"timeout\",\n \"random\",\n \"seed\",\n \"verbose\"\n ]\n return optparser, optkeys",
"def arg_parse():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-m\", \"--mix\", required=False, help=\"cube shuffle\")\n parser.add_argument(\"-e\", \"--explain\", action=\"store_true\", help=\"Get more explanation about steps\")\n options = parser.parse_args()\n return options",
"def _build_arg_parser():\n parser = argparse.ArgumentParser(\n description=_description,\n add_help=True,\n )\n add_generic_args(parser)\n add_diff_args(parser)\n add_filename_args(parser, [\"base\", \"remote\"])\n\n parser.add_argument(\n '-o', '--output',\n default=None,\n help=\"if supplied, the diff is written to this file. \"\n \"Otherwise it is printed to the terminal.\")\n\n return parser",
"def create_option_parser():\n from optparse import OptionParser\n usage='Usage: %prog [<options>] <bilingual file> <language tag 1> <language tag 2>'\n parser = OptionParser(usage=usage)\n\n parser.add_option(\n '-u', '--create-tuning',\n dest='tuning',\n help='Specify percentage of corpus to be used for tuning corpus.',\n default=0\n )\n parser.add_option(\n '-e', '--create-evaluation',\n dest='eval',\n help='Specify percentage of corpus to be used for tuning corpus.',\n default=0\n )\n return parser",
"def getArgumentParser():\n parser = argparse.ArgumentParser(description=\"Script for running optimization for the ZH dark photon SR\")\n parser.add_argument('-i',\n '--infile',\n dest='infile',\n help='Input CSV file',\n default = '/afs/cern.ch/work/s/ssevova/public/dark-photon-atlas/zhdarkphotonml/samples/v09/mc16d_v09_samples.csv')\n parser.add_argument('-o',\n '--output',\n dest='outdir',\n help='Output directory for plots, selection lists, etc',\n default='outdir')\n parser.add_argument('--plotInputs',action='store_true', help='Plot scaled train & test inputs')\n parser.add_argument('--plotOutputs',action='store_true', help='Plot scaled test outputs for given probability range')\n parser.add_argument('--lower',help='Lower limit for conditional filtering')\n parser.add_argument('--upper',help='Upper limit for conditional filtering')\n\n return parser",
"def get_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', type=str)\n parser.add_argument('--method', type=str)\n parser.add_argument('--size_part', type=float, default=None)\n parser.add_argument('--start', type=int, default=0)\n parser.add_argument('--count', type=int, default=None)\n return parser",
"def parse_arguments():\n parser = ArgumentParser(description=\"Run tests in parallel.\")\n parser.add_argument(\n \"-d\", \"--debug\", action=\"store_true\", help=\"Enable debug logging\"\n )\n parser.add_argument(\n \"-l\", \"--layer\", help=\"Greedy match test layer name.\", action=\"append\"\n )\n parser.add_argument(\n \"-m\", \"--module\", help=\"Greedy match module name.\", action=\"append\"\n )\n return parser.parse_args()",
"def build_parser(usage, **kwargs):\n return BetterArgumentParser(usage=usage, version=VERSION, **kwargs)",
"def create_arg_parser():\n server_modes = ['builtin', 'waitress']\n\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('h', metavar='HOST', help='Server HOST (e.g. \"localhost\")', type=str)\n parser.add_argument('p', metavar='PORT', help='Server PORT (e.g. \"5001\")', type=int)\n parser.add_argument('m', metavar='SERVER_MODE', help=\", \".join(server_modes), choices=server_modes, type=str)\n parser.add_argument('--debug', help=\"Run builtin server in debug mode\", action='store_true', default=False)\n\n return parser",
"def _create_argument_parser():\n\n parser = argparse.ArgumentParser(\n description=\"Execute a CPAchecker run in the VerifierCloud using the web interface.\"\n + \" Command-line parameters can additionally be read from a file if file name prefixed with '@' is given as argument.\",\n fromfile_prefix_chars=\"@\",\n add_help=False, # conflicts with -heap\n )\n\n parser.add_argument(\"-h\", \"--help\", action=\"help\", help=\"Prints this help.\")\n\n parser.add_argument(\n \"--cloudMaster\",\n dest=\"cloud_master\",\n default=\"https://vcloud.sosy-lab.org/cpachecker/webclient/\",\n metavar=\"HOST\",\n help=\"Sets the webclient host of the VerifierCloud instance to be used.\",\n )\n\n parser.add_argument(\n \"--cloudPriority\",\n dest=\"cloud_priority\",\n metavar=\"PRIORITY\",\n help=\"Sets the priority for this benchmark used in the VerifierCloud. Possible values are IDLE, LOW, HIGH, URGENT.\",\n )\n\n parser.add_argument(\n \"--cloudCPUModel\",\n dest=\"cpu_model\",\n type=str,\n default=None,\n metavar=\"CPU_MODEL\",\n help=\"Only execute runs in the VerifierCloud on CPU models that contain the given string.\",\n )\n\n parser.add_argument(\n \"--cloudUser\",\n dest=\"cloud_user\",\n metavar=\"USER:PWD\",\n help=\"The user and password for the VerifierCloud.\",\n )\n\n parser.add_argument(\n \"--revision\",\n dest=\"revision\",\n metavar=\"BRANCH:REVISION\",\n help=\"The svn revision of CPAchecker to use.\",\n )\n\n parser.add_argument(\n \"-d\", \"--debug\", action=\"store_true\", help=\"Enable debug output\"\n )\n\n parser.add_argument(\n \"-o\",\n \"--outputpath\",\n dest=\"output_path\",\n type=str,\n default=DEFAULT_OUTPUT_PATH,\n help=\"Output prefix for the generated results. \"\n + \"If the path is a folder files are put into it,\"\n + \"otherwise it is used as a prefix for the resulting files.\",\n )\n parser.add_argument(\n \"--resultFilePattern\",\n dest=\"result_file_pattern\",\n type=str,\n default=\"**\",\n help=\"Only files matching this glob pattern are transported back to the client.\",\n )\n\n parser.add_argument(\n \"-T\",\n \"--timelimit\",\n dest=\"timelimit\",\n default=None,\n type=util.parse_timespan_value,\n help=\"Time limit in seconds\",\n metavar=\"SECONDS\",\n )\n\n parser.add_argument(\n \"-M\",\n \"--memorylimit\",\n dest=\"memorylimit\",\n default=None,\n type=util.parse_memory_value,\n help=\"Memory limit\",\n metavar=\"BYTES\",\n )\n\n parser.add_argument(\n \"-c\",\n \"--corelimit\",\n dest=\"corelimit\",\n type=int,\n default=None,\n metavar=\"N\",\n help=\"Limit the tool to N CPU cores.\",\n )\n\n parser.add_argument(\n \"--version\", action=\"version\", version=\"%(prog)s \" + __version__\n )\n return parser",
"def create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('url', help='url to scrape')\n return parser"
] |
[
"0.72827846",
"0.7190032",
"0.7093924",
"0.7032623",
"0.70229095",
"0.69860554",
"0.6946884",
"0.694275",
"0.693303",
"0.6930106",
"0.69170386",
"0.68352914",
"0.6832233",
"0.68094486",
"0.68094486",
"0.6783497",
"0.6778075",
"0.6772473",
"0.675373",
"0.6753382",
"0.6745362",
"0.67453533",
"0.67277616",
"0.6726878",
"0.67200977",
"0.67179614",
"0.6714497",
"0.6710915",
"0.67108697",
"0.6709461"
] |
0.788973
|
0
|
Applies default secthresh & exclusion radius constraints
|
def apply_default_constraints(self):
try:
self.apply_secthresh(pipeline_weaksec(self.koi))
except NoWeakSecondaryError:
logging.warning('No secondary eclipse threshold set for {}'.format(self.koi))
self.set_maxrad(default_r_exclusion(self.koi))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def constraints(self):\n ...",
"def _discretize(self, constraints_object):\n pass",
"def objects_radius(self, centre, radius):",
"def apply_constraint(self):\n\t\tself.angle = self.constraint(self.angle) % 360",
"def cutout(self, centre, radius):",
"def constraints(self, x):\n pass",
"def _on_configure(self, event):\n self.radius = (min(event.width, event.height) - 2 * self.circ_pad) / 2",
"def get_receptive_field_radius(self):\n raise NotImplementedError()",
"def set_auto_throats_radius(self):\n\n for n1, n2 in self.graph.edges:\n self.graph[n1][n2]['radius'] = self._compute_auto_throat_radius(\n n1, n2)",
"def __init__(self, radius=1, thickness=1, inner_radius=0):\n\n super().__init__()\n self.radius = radius\n self.inner_radius = inner_radius\n self.thickness = thickness",
"def get_radius(self):",
"def contractor(self, *args, **kwargs):\n vertices = copy.deepcopy(args[0])\n nrange = len(vertices[0])\n xpts = []\n ypts = []\n for i in range(nrange):\n xpts.append(vertices[0][i].value)\n ypts.append(vertices[1][i].value)\n constraint = copy.deepcopy(args[1])\n \n \n \n \n qxdot,qxddot,qydot,qyddot = self.update_allq(xpts,ypts)\n \n ## the all important computation split (need to abstract this kind of thing)\n ##lhs = (np.sqrt(qxdot*qxdot + qydot*qydot)**3.) *constraint\n lhs = ( ( np.sqrt(qxdot**2 + qydot**2) )**3 )*constraint\n \n # check2 = qxdot*qyddot\n # if check2.width() < 1.e-2:\n # check2.min.value = check2.real.value\n # check2.max.value = check2.real.value\n # t1 = (lhs - check2)/qydot\n \n #\n # qyddot\n #\n check2 = qydot*qxddot\n if check2.width() < 1.e-2 and check2.contains(0.):\n check2.inf = 0.\n check2.sup = 0.\n #if qxdot.contains(0.) and abs(qxdot.min.value)>1.e-6:\n # print 'qxdot = ',qxdot\n # print 'qxdot not invertable, implement other logic please'\n if abs(float(qxdot.inf))<1.e-6:\n qxdot.inf = 1.e-10\n print 'invert qxdot'\n print 'qxdot = ', qxdot\n \n #t1 = (lhs + qydot*qxddot)/(qxdot)\n t1 = (lhs + check2)/(qxdot)\n \n t1 = t1 & qyddot # go ahead and shrink t1 to qyddot - they are logically equivalent\n total_ans = []\n useful_indices = []\n bad_indices = []\n for i in range(len(ypts)): \n min_ans = 0.\n for j in range(len(ypts)):\n if j==i:\n pass\n else:\n min_ans = (ypts[j]*float(self.localBasis[2,j])) + min_ans\n min_ans = t1 - min_ans\n if (abs(float(self.localBasis[2,i])) > 0.0):\n min_ans = min_ans/float(self.localBasis[2,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(ypts, total_ans)\n for i in useful_indices:\n if new_ans[i].isempty == False: # abs( new_ans[i].width() ) > 0.:\n ypts[i] = ypts[i] & new_ans[i]\n qxdot,qxddot,qydot,qyddot = self.update_allq(xpts,ypts)\n else:\n print 'warning, possible constraint violation, curvature 1'\n \n ## \n ## qxdot\n ##\n check2 = qydot*qxddot\n if check2.width() < 1.e-2 and check2.contains(0.):\n check2.inf = 0.\n check2.sup = 0.\n #if qyddot.contains(0.):\n # print 'qyddot = ',qyddot\n # print 'qyddot not invertable, implement other logic please'\n \n if qyddot.contains(0.) and qyddot.width()<1.e-6:\n qxdot.inf = 0.#1.e-10\n print 'invert qyddot'\n print 'qyddot = ',qyddot\n fix = (lhs + check2)*(1./qyddot)#*(qyddot**-1.)\n fix = fix & qxdot # go ahead and shrink fix to qxdot - they are logically equivalent\n total_ans = []\n useful_indices = []\n bad_indices = []\n \n for i in range(len(xpts)): #contract on x[i]\n min_ans = 0.\n for j in range(len(xpts)): # add up all jth pieces of the dot product except i\n if j==i:\n pass\n else:\n \n min_ans = (xpts[j]*float(self.localBasis[1,j] ) ) + min_ans\n min_ans = fix - min_ans\n if (abs(float(self.localBasis[1,i]) ) >0.0 ):\n min_ans = min_ans/float(self.localBasis[1,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(xpts, total_ans)\n for i in useful_indices:\n if not new_ans[i].isempty: # abs( new_ans[i].width() ) > 0.:\n xpts[i] = xpts[i] & new_ans[i]\n qxdot,qxddot,qydot,qyddot = self.update_allq(xpts,ypts)\n else:\n print 'warning, possible constraint violation, curvature 2'\n \n \n ## switch to the other side\n \n ##\n ## contract on qydot\n ##\n check2 = qxdot*qyddot\n if check2.width() < 1.e-2 and check2.contains(0.):\n check2.inf = 0.\n check2.sup = 0.\n# if qxddot.contains(0.):\n# print 'qxddot = ',qxddot\n# print 'qxddot not invertable, implement other logic please'\n# qxddot.min.value = 0.\n if qxddot.contains(0.):\n qxddot.inf = 0.\n \n print 'invert qxddot'\n print 'qxddot = ',qxddot\n t1 = (lhs - check2)/(-qxddot)#*(-qxddot**-1)\n t1 = t1 & qydot\n total_ans = []\n useful_indices = []\n bad_indices = []\n for i in range(len(ypts)): \n min_ans = 0.\n for j in range(len(ypts)):\n if j==i:\n pass\n else:\n #print 't1 = ',t1\n #print 'ypts[{}] = {}'.format(i,ypts[i])\n #print 'localbasis[{},{}] = {}'.format(1,i,self.localBasis[1,j])\n min_ans = (ypts[j]*float(self.localBasis[1,j])) + min_ans\n min_ans = t1 - min_ans\n if (abs(float(self.localBasis[1,i])) > 0.0):\n min_ans = min_ans/float(self.localBasis[1,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(ypts, total_ans)\n for i in useful_indices:\n if not new_ans[i].isempty: # abs( new_ans[i].width() ) > 0.:\n ypts[i] = ypts[i] & new_ans[i]\n else:\n print 'warning, possible constraint violation, curvature 3'\n \n ##contract on qxdot\n \n check2 = qxdot*qyddot\n if check2.width() < 1.e-2 and check2.contains(0.):\n check2.inf = 0.\n check2.sup = 0.\n #contract on qxddot\n# if qydot.contains(0.):\n# print 'qydot = ',qxddot\n# print 'qydot not invertable, implement other logic please'\n if qydot.contains(0.):\n qydot.inf = 0.\n print 'invert qydot'\n print 'qydot = ',qydot\n fix = (lhs - qxdot*qyddot)/(-qydot)#*(-qydot**-1)\n fix = fix & qxddot # go ahead and shrink t1 to quddot - they are logically equivalent\n total_ans = []\n useful_indices = []\n bad_indices = []\n for i in range(len(xpts)):\n min_ans = 0.\n for j in range(len(xpts)):\n if j==i:\n pass\n else:\n min_ans = (xpts[j]*float(self.localBasis[2,j] ) ) + min_ans\n min_ans = fix - min_ans\n if (abs(float(self.localBasis[2,i]) ) >0.0 ):\n min_ans = min_ans/float(self.localBasis[2,i])\n useful_indices.append(i)\n else:\n bad_indices.append(i)\n total_ans.append(min_ans)\n \n new_ans = vector_AND_(xpts, total_ans)\n for i in useful_indices:\n if not new_ans[i].isempty: # abs( new_ans[i].width() ) > 0.:\n xpts[i] = xpts[i] & new_ans[i]\n else:\n print 'warning, possible constraint violation, curvature 4'\n \n for i in range(nrange):\n vertices[0][i].value = xpts[i]\n vertices[1][i].value = ypts[i]\n return vertices",
"def check_overlapping(self, fit_radius=True, merge=True, mindist='auto', update_geometry=False):\n\n from scipy.spatial.distance import cdist\n from scipy.spatial import cKDTree\n # index = list(self.graph)[:]\n # centers = np.array(list(zip(*nx.get_node_attributes(self.graph,'center').values()))).T\n # pores_radii = np.fromiter(nx.get_node_attributes(self.graph,'radius').values(),dtype=np.float)\n\n pores_radii = list(nx.get_node_attributes(\n self.graph, 'radius').items())\n # we begin by the bigger pores\n pores_radii.sort(key=lambda tup: tup[1], reverse=True)\n index, pores_radii = zip(*pores_radii)\n pores_radii = np.array(pores_radii)\n\n centers = nx.get_node_attributes(self.graph, 'center')\n centers = [np.array(centers[i]) for i in index]\n centers = np.array(centers)\n # distances = cdist(centers,centers)\n kdtree = cKDTree(centers)\n\n stop = False\n\n while not stop:\n\n stop = True\n\n for i, n1 in enumerate(index):\n\n #distances = cdist(centers,[self.graph.nodes[n1]['center']])[:,0]\n\n if self.graph.has_node(n1):\n\n if mindist == 'auto':\n gap = self.graph.nodes[n1]['radius']*0.02\n else:\n gap = mindist\n\n labels = kdtree.query_ball_point(\n self.graph.nodes[n1]['center'], 2.5*self.graph.nodes[n1]['radius'])\n labels.remove(i)\n # distances,labels = kdtree.query(x=net.graph.nodes[n1]['center'],2*self.graph.nodes[n1]['radius'],n_jobs=1)\n # labels.remove(i)\n #distance *= 0.998\n distances = cdist(centers[labels], [self.graph.nodes[n1]['center']])[\n :, 0]*0.998\n d = distances - pores_radii[labels]\n d -= self.graph.nodes[n1]['radius']\n # On commence par la distance la plus faible\n d_and_labels = [(d[j], k) for j, k in enumerate(labels)]\n d_and_labels.sort(key=lambda t: t[0])\n\n for (dist, ind) in d_and_labels:\n\n n2 = index[ind]\n if self.graph.has_node(n2) and self.graph.has_node(n1):\n\n # Le centre du pore né est dans la sphère du pore n1 OU il y a overlapping et fit_radius == False\n # -> Merging ou suppression du pore de plus petit rayon\n if (dist + self.graph.nodes[n2]['radius'] <= gap) or (dist < gap and dist + self.graph.nodes[n2]['radius'] > gap and not fit_radius):\n\n if (self.graph.nodes[n1]['radius'] >= self.graph.nodes[n2]['radius']):\n if merge:\n self.merge_pores(n1, n2)\n print(\"pore\", n1, \"and\", n2,\n \"overlap: merging (deleting\", n2, \")\")\n else:\n self.remove_pore(n2)\n print(\"pore\", n1, \"and\", n2,\n \"overlap: deleting\", n2)\n\n else:\n if merge:\n self.merge_pores(n2, n1)\n print(\"pore\", n1, \"and\", n2,\n \"overlap: merging (deleting\", n1, \")\")\n else:\n self.remove_pore(n1)\n print(\"pore\", n1, \"and\", n2,\n \"overlap: deleting\", n2)\n # On termine l'itération car le pore n1 n'existe plus...\n break\n\n # Overlapping et fit_radius == True\n # 3 options:\n # -Le rayon du pore le plus petit est modifié\n # -Merging\n # -Suppression\n elif dist < gap and dist + self.graph.nodes[n2]['radius'] > gap and fit_radius:\n if (self.graph.nodes[n1]['radius'] >= self.graph.nodes[n2]['radius']):\n r = dist + \\\n self.graph.nodes[n2]['radius'] - \\\n self.graph.nodes[n1]['radius'] - gap\n if self.graph.nodes[n2]['radius'] >= r and r > 0:\n self.graph.nodes[n2]['radius'] = r\n pores_radii[ind] = r\n print(\n \"pore\", n1, \"and\", n2, \"overlap: changin radius of\", n2, \"to\", r)\n else:\n if merge:\n self.merge_pores(n1, n2)\n print(\n \"pore\", n1, \"and\", n2, \"overlap: merging (deleting\", n2, \")\")\n else:\n self.remove_pore(n2)\n print(\"pore\", n1, \"and\", n2,\n \"overlap: deleting\", n2)\n else:\n if self.graph.nodes[n1]['radius'] >= dist:\n self.graph.nodes[n1]['radius'] = dist\n pores_radii[i] = dist\n print(\n \"pore\", n1, \"and\", n2, \"overlap: changin radius of\", n1, \"to\", dist)\n else:\n if merge:\n self.merge_pores(n2, n1)\n print(\n \"pore\", n1, \"and\", n2, \"overlap: merging (deleting\", n1, \")\")\n else:\n self.remove_pore(n1)\n print(\"pore\", n1, \"and\", n2,\n \"overlap: deleting\", n1)\n # On termine l'itération car le pore n1 n'existe plus...\n break\n\n if update_geometry:\n self.set_auto_throats_length()\n self.set_auto_throats_radius()",
"def _constraints_other(self):\n pass",
"def add_corridor_constraint(self,seg,r,weight=1.0):\n\n constraint_type = \"cylinder\"\n params = dict()\n params['x1'] = np.array([ self.qr_polytraj.waypoints['x'][0,seg],\n self.qr_polytraj.waypoints['y'][0,seg],\n self.qr_polytraj.waypoints['z'][0,seg]])\n params['x2'] = np.array([ self.qr_polytraj.waypoints['x'][0,seg+1],\n self.qr_polytraj.waypoints['y'][0,seg+1],\n self.qr_polytraj.waypoints['z'][0,seg+1]])\n params['der'] = 0\n params['l'] = r # Give the same radius buffer on the end caps\n params['r'] = r\n params['weight'] = weight\n params['keep_out'] = False\n params['active_seg'] = seg\n\n\n self.qr_polytraj.add_constraint(constraint_type,params,dynamic_weighting=False,sum_func=False)",
"def _constraints_external(self):\n pass",
"def __init__(self, minRA, maxRA, minDec, maxDec, radius_RA, radius_Dec):\n\n self.RA = np.mean([minRA, maxRA])\n self.Dec = np.mean([minDec, maxDec])\n self.radius_RA = radius_RA\n self.radius_Dec = radius_Dec\n\n # define the polygon attached to this area\n \"\"\"\n self.area_poly = areap(self.RA-radius_RA/2.,\n self.RA+radius_RA/2.,\n self.Dec-radius_Dec/2.,\n self.Dec+radius_Dec/2.)\n \"\"\"\n self.area_poly = areap(minRA, maxRA, minDec, maxDec)\n all_patches = self.getpatches(minRA, maxRA, minDec, maxDec)\n\n self.patches = self.inside(all_patches)",
"def _constraints_utility(self):\n\n def rule(model):\n total = summation(self.utilities, model.A)\n return model.A_total == total\n\n self.model.constrain_A_total = Constraint(rule=rule)\n\n def rule(model):\n total = 2 * summation(self.utilities, model.A2)\n return model.A2_total == total\n\n self.model.constrain_A2_total = Constraint(rule=rule)\n\n def rule(model):\n total = 3 * summation(self.utilities, model.A3)\n return model.A3_total == total\n\n self.model.constrain_A3_total = Constraint(rule=rule)\n\n def rule(model):\n total = 4 * summation(self.utilities, model.A4)\n return model.A4_total == total\n\n self.model.constrain_A4_total = Constraint(rule=rule)\n\n def rule(model):\n completion_bonus = self.task_completion_bonus * self.task_duration\n total = summation(completion_bonus, model.T_total)\n return model.Completion_total == total\n\n self.model.constrain_completion_total = Constraint(rule=rule)\n\n def rule(model):\n scaling = 0.2\n affinity = np.outer(c.AFFINITY_COGNITIVE, self.task_cognitive_load)\n\n # TODO(cathywu) replace this code when \"simple slicing\" is clarified\n zeros1 = np.zeros((1, self.num_tasks))\n zeros2 = np.zeros((2, self.num_tasks))\n zeros3 = np.zeros((3, self.num_tasks))\n\n total = summation(affinity, model.A)\n total += summation(affinity, model.A2)\n total += summation(affinity, model.A3)\n total += summation(affinity, model.A4)\n\n total += summation(np.vstack((affinity[1:, :], zeros1)), model.A2)\n total += summation(np.vstack((affinity[1:, :], zeros1)), model.A3)\n total += summation(np.vstack((affinity[1:, :], zeros1)), model.A4)\n\n total += summation(np.vstack((affinity[2:, :], zeros2)), model.A3)\n total += summation(np.vstack((affinity[2:, :], zeros2)), model.A4)\n\n total += summation(np.vstack((affinity[3:, :], zeros3)), model.A4)\n total *= scaling\n\n return model.Affinity_cognitive_total == total\n\n self.model.constrain_affinity_cognitive_total = Constraint(rule=rule)",
"def horizontal_radius(self):\n raise NotImplementedError",
"def __init__(self,r):\n self.radius = r\n self.uc_centered_a = r\n self.uc_centered_b = r*np.sqrt(3.0)",
"def radius(self,xc=None,yc=None):\n if xc == None:\n xc = self.x1\n if yc == None:\n yc = self.y1\n self.r = sqrt((self.x-xc)**2+(self.y-yc)**2)",
"def define_potential(self) -> hoomd.md.pair.pair:\n self.potential_args.setdefault('r_cut', 2.5)\n potential = self.potential(\n **self.potential_args,\n nlist=hoomd.md.nlist.cell()\n )\n for i, j in combinations_with_replacement(self._radii.keys(), 2):\n potential.pair_coeff.set(i, j, epsilon=1, sigma=self._radii[i] + self._radii[j])\n return potential",
"def __init__(self, radius):\n self.radius = radius",
"def build_constraints_boundaries(self):\n\n # Trapezoidal and Hermite-Simpson methods can't compute\n # defects at the last node contrary to pseudospectral methods\n coll_method = self.options['tr_method'] in [\n 'trapezoidal', 'hermite-simpson']\n n_nodes = self.problem.prm['n_nodes'] - \\\n 1 if coll_method else self.problem.prm['n_nodes']\n\n # Defects lower and upper boundaries\n defects_low = np.zeros(\n self.problem.prm['n_states'] * n_nodes)\n defects_upp = np.zeros(\n self.problem.prm['n_states'] * n_nodes)\n\n # Path lower and upper boundaries\n path_low = np.hstack([self.problem.low_bnd.path]\n * (self.problem.prm['n_nodes']))\n path_upp = np.hstack([self.problem.upp_bnd.path]\n * (self.problem.prm['n_nodes']))\n\n # Events lower and upper boundaries\n event_low = self.problem.low_bnd.event\n event_upp = self.problem.upp_bnd.event\n\n # Assembly of the lower and upper boundaries vectors\n low = np.concatenate((defects_low, path_low, event_low))\n upp = np.concatenate((defects_upp, path_upp, event_upp))\n\n return low, upp",
"def fix_curvature(self) -> None:\n self.n1.fix = True\n self.n2.fix = True",
"def get_radius(self):\r\n return 1",
"def constraints(self):\n constraints = np.concatenate( (np.ravel(self.noise_var_constraint), \n self.kern.constraints), axis=0)\n return constraints",
"def __addValueConstraints(self):\n for x in range(self.width):\n for y in range(self.height):\n g = self.grid[(x, y)]\n self.solver.add(\n Or([g == Magnets.EMPTY, g == Magnets.PLUS, g == Magnets.MINUS]))\n if x > 0:\n left = self.grid[(x-1, y)]\n self.solver.add(Or([g != left, g == Magnets.EMPTY]))\n if y > 0:\n up = self.grid[(x, y-1)]\n self.solver.add(Or([g != up, g == Magnets.EMPTY]))",
"def __init__(self, constraint):\n self.__filter__ = []\n if constraint.get('range', None):\n constraint_range = constraint['range']\n range_start = constraint_range.get('start', None)\n range_end = constraint_range.get('end', None)\n if range_start:\n self.__filter__.append(lambda x: x >= float(range_start))\n if range_end:\n self.__filter__.append(lambda x: x <= float(range_end))\n if bool(constraint.get('unique', None)):\n self.__seen__ = {}\n\n def unique_check(x):\n if self.__seen__.get(x, None) != None:\n return False\n else:\n self.__seen__[x] = True\n return True\n self.__filter__.append(unique_check)\n self.__allow_null__ = bool(constraint.get('allow_null', None))",
"def validation(self):\r\n\r\n if self.__radius <= 0:\r\n raise ValueError(\"the input radius must be a positive number\")"
] |
[
"0.57216144",
"0.5593304",
"0.5487926",
"0.5459716",
"0.5433389",
"0.5411653",
"0.5399157",
"0.529492",
"0.5289383",
"0.528124",
"0.52704936",
"0.52636516",
"0.5226623",
"0.52207905",
"0.52052534",
"0.51941854",
"0.51933926",
"0.5185518",
"0.51653844",
"0.51490223",
"0.5141201",
"0.5120843",
"0.51196635",
"0.5117338",
"0.5112763",
"0.5112375",
"0.51065964",
"0.50865054",
"0.5068361",
"0.50505036"
] |
0.6335594
|
0
|
Returns true if provenance of property is SPE or AST
|
def use_property(kepid, prop):
try:
prov = kicu.DATA.ix[kepid, '{}_prov'.format(prop)]
return any([prov.startswith(s) for s in ['SPE', 'AST']])
except KeyError:
raise MissingStellarError('{} not in stellar table?'.format(kepid))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def isprop(v):\n return isinstance(v, property)",
"def isproperty(object):\n return isinstance(object, property)",
"def is_simple(self):\n return self.propertyValueType.lower() in ('float', 'double',\n 'int', 'integer',\n 'string')",
"def isSemantics(self):\n return _libsbml.ASTNode_isSemantics(self)",
"def is_psionic(self) -> bool:\n return ATTRIBUTE.Psionic.value in self.type_data.attributes",
"def species_has_sp(species_output_dict: dict) -> bool:\n if species_output_dict['paths']['sp'] or species_output_dict['paths']['composite']:\n return True\n return False",
"def is_vespene_geyser(self) -> bool:\n return self.type_data.has_vespene",
"def is_prop_symbol(s):\n return is_symbol(s) and s[0].isupper() and s != 'TRUE' and s != 'FALSE'",
"def can_prove(self, target):\n return self.prop == target.prop and set(self.hyps).issubset(set(target.hyps))",
"def check_thm_type(self):\n for t in list(self.hyps) + [self.prop]:\n if t.checked_get_type() != BoolType:\n raise term.TypeCheckException('expect boolean type for propositions')",
"def has_expression(self):\n return self._expression is not None",
"def is_procedure(vba_object):\n if hasattr(vba_object, 'statements'):\n return True\n else:\n return False",
"def test_should_return_appropriate_type(self):\r\n assert isinstance(self.spec_parser.parse_statement(self.edge_spec), Edge)\r\n assert isinstance(self.spec_parser.parse_statement(self.property_spec), Property)",
"def is_plotable(self):\n return self.propertyValueType.lower() in ('float', 'double',\n 'int', 'integer')",
"def isComputed(self) -> bool:\n ...",
"def is_expression(self):\r\n return conf.lib.clang_isExpression(self)",
"def is_apt(self):\r\n return self.has_label('apt')",
"def is_proved(self):\n return len(self.proofs) > 0",
"def is_variant(self):\n return bool(self.gt_type)",
"def __eq__(self, obj: \"Property\") -> bool:\n return self.name == obj.name and self.property_type == obj.property_type",
"def process_property(self, prop):\n NifLog.warn(f\"Unknown property block found : {prop.name}\")\n NifLog.warn(f\"This type isn't currently supported: {type(prop)}\")",
"def has_path_source(self) -> bool:\n\n return any(self.is_path_type(x) for x in self.parameters)",
"def is_P(self):\n return isinstance(self,P)",
"def is_equals(self):\n return self.prop.is_equals()",
"def match(self, proof: dict) -> bool:\n return proof.get(\"proofPurpose\") == self.term",
"def _is_simple_type(cls):\n return all([\n AnnotationWrapper(anno).is_simple_in_opt_and_not_opt\n for anno in cls._used_annotations()\n ])",
"def hasVeryTrustedValue(self):\n return self.subnode_source.hasVeryTrustedValue()",
"def is_indexed_or_named_property_operation(self):\n return self.is_getter or self.is_setter or self.is_deleter",
"def parsable_as_expression(self):\n return self.parsable and self.expression_ast_node is not None",
"def promoter(self):\n return self.mut['ProMutation'] is None"
] |
[
"0.6570999",
"0.6159433",
"0.562184",
"0.5580678",
"0.55424774",
"0.54955554",
"0.54840237",
"0.5463147",
"0.5448263",
"0.53668535",
"0.5354428",
"0.53308886",
"0.5306961",
"0.53069216",
"0.5291205",
"0.52911955",
"0.52770704",
"0.52570504",
"0.5237203",
"0.5223409",
"0.5189958",
"0.51840895",
"0.5181425",
"0.51621175",
"0.5155834",
"0.5142169",
"0.51398504",
"0.51282984",
"0.5108469",
"0.50997794"
] |
0.6637285
|
0
|
returns star config object for given KOI
|
def star_config(koi, bands=['g','r','i','z','J','H','K'],
unc=dict(g=0.05, r=0.05, i=0.05, z=0.05,
J=0.02, H=0.02, K=0.02), **kwargs):
folder = os.path.join(KOI_FPPDIR, ku.koiname(koi))
if not os.path.exists(folder):
os.makedirs(folder)
config = ConfigObj(os.path.join(folder,'star.ini'))
koi = ku.koiname(koi)
maxAV = koi_maxAV(koi)
config['maxAV'] = maxAV
mags = ku.KICmags(koi)
for band in bands:
if not np.isnan(mags[band]):
config[band] = (mags[band], unc[band])
config['Kepler'] = mags['Kepler']
kepid = KOIDATA.ix[koi,'kepid']
if use_property(kepid, 'teff'):
teff, e_teff = (kicu.DATA.ix[kepid, 'teff'],
kicu.DATA.ix[kepid, 'teff_err1'])
if not any(np.isnan([teff, e_teff])):
config['Teff'] = (teff, e_teff)
if use_property(kepid, 'logg'):
logg, e_logg = (kicu.DATA.ix[kepid, 'logg'],
kicu.DATA.ix[kepid, 'logg_err1'])
if not any(np.isnan([logg, e_logg])):
config['logg'] = (logg, e_logg)
if use_property(kepid, 'feh'):
feh, e_feh = (kicu.DATA.ix[kepid, 'feh'],
kicu.DATA.ix[kepid, 'feh_err1'])
if not any(np.isnan([feh, e_feh])):
config['feh'] = (feh, e_feh)
for kw,val in kwargs.items():
config[kw] = val
return config
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def fpp_config(koi, **kwargs):\n folder = os.path.join(KOI_FPPDIR, ku.koiname(koi))\n if not os.path.exists(folder):\n os.makedirs(folder)\n config = ConfigObj(os.path.join(folder,'fpp.ini'))\n\n koi = ku.koiname(koi)\n\n rowefit = jrowe_fit(koi)\n\n config['name'] = koi\n ra,dec = ku.radec(koi)\n config['ra'] = ra\n config['dec'] = dec\n config['rprs'] = rowefit.ix['RD1','val']\n config['period'] = rowefit.ix['PE1', 'val']\n\n config['starfield'] = kepler_starfield_file(koi)\n\n for kw,val in kwargs.items():\n config[kw] = val\n\n config['constraints'] = {}\n config['constraints']['maxrad'] = default_r_exclusion(koi)\n try:\n config['constraints']['secthresh'] = pipeline_weaksec(koi)\n except NoWeakSecondaryError:\n pass\n\n return config",
"def get_pixis_config_object(env, src):\n cfg = env.configStore()\n\n o = cfg.get(_psana.Pixis.ConfigV1, src)\n if o is not None: return o\n\n return None",
"def get_confg(self):\n\n ini = ConfigParser()\n self.config_parser = ini\n # if isinstance(cfile, (file, StringIO.StringIO, io.BytesIO)):\n if isinstance(self.config_data, str) and self.config_data:\n fp = io.BytesIO(self.config_data)\n ini.readfp(fp)\n elif self.config_file is not None:\n ini.read([self.config_file, os.path.expanduser('~/.' + self.config_file)])\n\n if ini.has_section('whoshere'):\n return ini.items('whoshere')\n\n return {}",
"def get_istar_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.iStar.ConfigV1, src)\n if o is not None: return o\n\n return None",
"def cg_config():\n return {}",
"def get_epix10ka_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Epix.Config10kaV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaV1, src)\n if o is not None: return o\n\n return None",
"def config(self):\n annotations = IAnnotations(self.context)\n return annotations.get(CONFIGURATION_KEY, {})",
"def get_opal1k_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Opal1k.ConfigV1, src)\n if o is not None: return o\n\n return None",
"def get_epix10ka_any_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Epix.Config10ka2MV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10ka2MV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaQuadV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaQuadV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaV1, src)\n if o is not None: return o\n\n return None",
"def get_config(self):\n return {}",
"def get_iAF1260b_config():\n package_path = get_package_path()\n metabolism_file = os.path.join(package_path, 'bigg_models', 'iAF1260b.json')\n return {'model_path': metabolism_file}",
"def config(self):\n return {}",
"def get_config(self):\n if self.allow_reco():\n return self.chs_config()\n else:\n return self.get_config_j(self.id)",
"def get_epix_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Epix.Config100aV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config100aV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10ka2MV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10ka2MV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaQuadV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaQuadV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10KV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.ConfigV1, src)\n if o is not None: return o\n\n return None",
"def get_epix10ka2m_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Epix.Config10ka2MV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10ka2MV1, src)\n if o is not None: return o\n\n return None",
"def get_config_object() -> \"BaseConfig\":\n assert (\n len(G_CONFIG_OBJECT) == 1\n ), \"Have you created quantize config object before calling `quantize_model`?\"\n if G_CONFIG_OBJECT:\n return G_CONFIG_OBJECT[0]",
"def get_acqiris_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Acqiris.ConfigV1, src)\n if o is not None: return o\n\n return None",
"def configuration():",
"def istio_config(self) -> Optional[pulumi.Input['IstioConfigArgs']]:\n return pulumi.get(self, \"istio_config\")",
"def get_config_template(self) -> cconfig.Config:",
"def get_uxi_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Uxi.ConfigV1, src)\n if o is not None: return o\n\n return None",
"def get_config(self):\n return {'name': self.name, 'dtype': self.dtype}",
"def get_config(self):\n return {\"name\": self.name, \"tunable\": self.tunable}",
"def get_princeton_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Princeton.ConfigV5, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Princeton.ConfigV4, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Princeton.ConfigV3, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Princeton.ConfigV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Princeton.ConfigV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Pimax.ConfigV1, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Pixis.ConfigV1, src)\n if o is not None: return o\n\n return None",
"def config(self):\n return self.namespace['config']",
"def get_default_config(self):\n if not self.iface_type:\n return None\n\n defaults = {}\n defaults['description'] = self.interface_name + ' Interface'\n defaults['admin'] = 'up'\n if self.is_ethernet:\n defaults['speed'] = 'auto'\n defaults['duplex'] = 'auto'\n defaults['type'] = 'bridged'\n elif self.iface_type == 'Bridge-Aggregation':\n defaults['type'] = 'bridged'\n else:\n defaults['type'] = 'routed'\n\n return defaults",
"def get_e_coli_core_config():\n package_path = get_package_path()\n metabolism_file = os.path.join(package_path, 'bigg_models', 'e_coli_core.json')\n return {'model_path': metabolism_file}",
"def _get_MindtPy_GOA_config():\n CONFIG = ConfigBlock('MindtPy-GOA')\n\n _add_common_configs(CONFIG)\n _add_goa_configs(CONFIG)\n _add_oa_cuts_configs(CONFIG)\n _add_subsolver_configs(CONFIG)\n _add_tolerance_configs(CONFIG)\n _add_bound_configs(CONFIG)\n return CONFIG",
"def config(self) -> Dict[str, Any]:",
"def _get_config(self):\n return self.__config"
] |
[
"0.60090053",
"0.5942401",
"0.5920423",
"0.5912436",
"0.5829198",
"0.55174524",
"0.55119944",
"0.54936326",
"0.5490124",
"0.5460084",
"0.54068804",
"0.5390276",
"0.5330812",
"0.5309186",
"0.53012615",
"0.52819467",
"0.52716434",
"0.526362",
"0.52570665",
"0.52288973",
"0.5213522",
"0.5205488",
"0.5160786",
"0.5146575",
"0.5120113",
"0.5111574",
"0.51110494",
"0.5104246",
"0.51017493",
"0.508643"
] |
0.6899527
|
0
|
returns config object for given KOI
|
def fpp_config(koi, **kwargs):
folder = os.path.join(KOI_FPPDIR, ku.koiname(koi))
if not os.path.exists(folder):
os.makedirs(folder)
config = ConfigObj(os.path.join(folder,'fpp.ini'))
koi = ku.koiname(koi)
rowefit = jrowe_fit(koi)
config['name'] = koi
ra,dec = ku.radec(koi)
config['ra'] = ra
config['dec'] = dec
config['rprs'] = rowefit.ix['RD1','val']
config['period'] = rowefit.ix['PE1', 'val']
config['starfield'] = kepler_starfield_file(koi)
for kw,val in kwargs.items():
config[kw] = val
config['constraints'] = {}
config['constraints']['maxrad'] = default_r_exclusion(koi)
try:
config['constraints']['secthresh'] = pipeline_weaksec(koi)
except NoWeakSecondaryError:
pass
return config
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_confg(self):\n\n ini = ConfigParser()\n self.config_parser = ini\n # if isinstance(cfile, (file, StringIO.StringIO, io.BytesIO)):\n if isinstance(self.config_data, str) and self.config_data:\n fp = io.BytesIO(self.config_data)\n ini.readfp(fp)\n elif self.config_file is not None:\n ini.read([self.config_file, os.path.expanduser('~/.' + self.config_file)])\n\n if ini.has_section('whoshere'):\n return ini.items('whoshere')\n\n return {}",
"def config(self):\n annotations = IAnnotations(self.context)\n return annotations.get(CONFIGURATION_KEY, {})",
"def get_epix10ka_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Epix.Config10kaV2, src)\n if o is not None: return o\n\n o = cfg.get(_psana.Epix.Config10kaV1, src)\n if o is not None: return o\n\n return None",
"def get_opal1k_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Opal1k.ConfigV1, src)\n if o is not None: return o\n\n return None",
"def get_config(self):\n if self.allow_reco():\n return self.chs_config()\n else:\n return self.get_config_j(self.id)",
"def get_config(self):\n return {\"name\": self.name, \"tunable\": self.tunable}",
"def get_config(self):\n return {}",
"def star_config(koi, bands=['g','r','i','z','J','H','K'],\n unc=dict(g=0.05, r=0.05, i=0.05, z=0.05,\n J=0.02, H=0.02, K=0.02), **kwargs):\n folder = os.path.join(KOI_FPPDIR, ku.koiname(koi))\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n config = ConfigObj(os.path.join(folder,'star.ini'))\n\n koi = ku.koiname(koi)\n\n maxAV = koi_maxAV(koi)\n config['maxAV'] = maxAV\n\n mags = ku.KICmags(koi)\n for band in bands:\n if not np.isnan(mags[band]):\n config[band] = (mags[band], unc[band])\n config['Kepler'] = mags['Kepler']\n\n kepid = KOIDATA.ix[koi,'kepid']\n\n if use_property(kepid, 'teff'):\n teff, e_teff = (kicu.DATA.ix[kepid, 'teff'],\n kicu.DATA.ix[kepid, 'teff_err1'])\n if not any(np.isnan([teff, e_teff])):\n config['Teff'] = (teff, e_teff)\n\n if use_property(kepid, 'logg'):\n logg, e_logg = (kicu.DATA.ix[kepid, 'logg'],\n kicu.DATA.ix[kepid, 'logg_err1'])\n if not any(np.isnan([logg, e_logg])):\n config['logg'] = (logg, e_logg)\n\n if use_property(kepid, 'feh'):\n feh, e_feh = (kicu.DATA.ix[kepid, 'feh'],\n kicu.DATA.ix[kepid, 'feh_err1'])\n if not any(np.isnan([feh, e_feh])):\n config['feh'] = (feh, e_feh)\n\n for kw,val in kwargs.items():\n config[kw] = val\n\n return config",
"def get_config_template(self) -> cconfig.Config:",
"def config():\n return _config",
"def get_uxi_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Uxi.ConfigV1, src)\n if o is not None: return o\n\n return None",
"def get_acqiris_config_object(env, src):\n cfg = env.configStore()\n o = cfg.get(_psana.Acqiris.ConfigV1, src)\n if o is not None: return o\n\n return None",
"def istio_config(self) -> Optional[pulumi.Input['IstioConfigArgs']]:\n return pulumi.get(self, \"istio_config\")",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config",
"def _get_config(self):\n return self.__config"
] |
[
"0.64626646",
"0.6120666",
"0.6065999",
"0.6041188",
"0.6032176",
"0.599006",
"0.5987609",
"0.5959099",
"0.59461635",
"0.59364283",
"0.5936055",
"0.59172744",
"0.59112185",
"0.5904476",
"0.5904476",
"0.5904476",
"0.5904476",
"0.5904476",
"0.5904476",
"0.5904476",
"0.5904476",
"0.5904476",
"0.5904476",
"0.5904476",
"0.5904476",
"0.5904476",
"0.5904476",
"0.5904476",
"0.5904476",
"0.5904476"
] |
0.6510853
|
0
|
Predict a single batch of images, optionally with augmentation. Augmentations vectorized across the entire batch and predictions averaged.
|
def predict_batch(self, imgs_batch, augment=False):
if augment:
aug_funcs = [
lambda x: x, # identity
lambda x: x[:, ::-1, ...], # vlip
lambda x: x[:, :, ::-1], # hflip
lambda x: np.rot90(x, 1, axes=(1, 2)), # +90
lambda x: np.rot90(x, 2, axes=(1, 2)), # +180
lambda x: np.rot90(x, 3, axes=(1, 2)), # +270
lambda x: np.rot90(x, 1, axes=(1, 2))[:, ::-1, ...], # vflip(+90)
lambda x: np.rot90(x, 1, axes=(1, 2))[:, :, ::-1] # vflip(+90)
]
yp = np.zeros((imgs_batch.shape[0], len(TAGS)))
for aug_func in aug_funcs:
imgs_batch = aug_func(imgs_batch)
tags_batch = self.net.predict(imgs_batch)
yp += tags_batch / len(aug_funcs)
return yp
else:
return self.net.predict_on_batch(imgs_batch)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def predict(self, images, batch_size):\n pass",
"def warmup_predict(model, imgs, Npred):\n H = augmented_state_matrix(model[:-1], imgs, 0)\n h0 = H[-2]\n y0 = imgs[-1]\n return predict(model, y0, h0, Npred)",
"def predict_on_batch(self, input_batch):\n from deeplift.util import run_function_in_batches\n from deeplift.util import compile_func\n x_standardized = self.model._batch_to_list(input_batch)\n if self.fwd_predict_fn is None:\n # TODO: Once DeepLIFT layer annotation works integrate it here too:\n \"\"\"\n # identify model output layers:\n self.output_layers_idxs = []\n for output_name in self.model.model.output_names:\n for i, l in enumerate(self.model.model.layers):\n if l.name == output_name:\n self.output_layers_idxs.append(i)\n \"\"\"\n inputs = [self.deeplift_model.get_layers()[i].get_activation_vars()\n for i in self.input_layer_idxs]\n outputs = [self.deeplift_model.get_layers()[i].get_activation_vars()\n for i in self.output_layers_idxs]\n self.fwd_predict_fn = compile_func(inputs, outputs)\n\n preds = run_function_in_batches(\n input_data_list=x_standardized,\n func=self.fwd_predict_fn,\n batch_size=self.batch_size,\n progress_update=None)\n\n preds = np.array(preds)\n if len(self.output_layers_idxs) == 1:\n preds = preds[0, ...]\n\n return preds",
"def predict(self, X, pred_batch_size=None):",
"def batched_predict(model, batcher, batch_size, int_mapped_X, doc_labels):\n # Intialize batcher but dont shuffle.\n train_batcher = batcher(full_X=int_mapped_X, full_y=doc_labels,\n batch_size=batch_size, shuffle=False)\n preds = []\n for batch_X, _ in train_batcher.next_batch():\n batch_preds = model.predict(batch_X=batch_X)\n preds.append(batch_preds)\n preds = np.hstack(preds)\n return preds",
"def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> np.ndarray:\n raise NotImplementedError",
"def predict(self):\n batch = get_predict_batch(1, num_rec_out=self.num_test_rec)\n self.g_model.test_batch(\n batch, self.global_step, num_rec_out=self.num_test_rec)",
"def model_predict(img, model, preprocess_func):\n img = img.resize((224, 224)) # Each model expects shape: (224, 224, 3)\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n\n x = preprocess_func(x)\n preds = model.predict(x)\n return preds",
"def predict_all(self, imgs):\n return self._predict(imgs)",
"def predict_on_batch(engine, batch):\n\t\tengine.model.eval()\n\t\tengine.model.rpn.nms_thresh = 0.3\n\t\twith torch.no_grad():\n\t\t\timgs, target = prepare_batch(batch, device=get_device(engine.model))\n\t\t\ty_pred = engine.model(imgs)\n\t\treturn y_pred, target",
"def predict(self, dataset, transformers=[]):\n y_preds = []\n n_tasks = self.get_num_tasks()\n ind = 0\n\n for (X_batch, _, _, ids_batch) in dataset.iterbatches(\n self.batch_size, deterministic=True):\n n_samples = len(X_batch)\n y_pred_batch = self.predict_on_batch(X_batch)\n # Discard any padded predictions\n y_pred_batch = y_pred_batch[:n_samples]\n y_pred_batch = np.reshape(y_pred_batch, (n_samples, n_tasks))\n y_pred_batch = undo_transforms(y_pred_batch, transformers)\n y_preds.append(y_pred_batch)\n y_pred = np.vstack(y_preds)\n\n # The iterbatches does padding with zero-weight examples on the last batch.\n # Remove padded examples.\n n_samples = len(dataset)\n y_pred = np.reshape(y_pred, (n_samples, n_tasks))\n # Special case to handle singletasks.\n if n_tasks == 1:\n y_pred = np.reshape(y_pred, (n_samples,))\n return y_pred",
"def predict(self, inputs: Tuple[Tensor], batch_img_metas: List[dict],\n test_cfg: ConfigType):\n if self.use_se_loss:\n seg_logits = self.forward(inputs)[0]\n else:\n seg_logits = self.forward(inputs)\n return self.predict_by_feat(seg_logits, batch_img_metas)",
"def infer(self, x, batch_size=None, **kwargs):\n if not batch_size:\n batch_size = self.batch_size\n return self.model.predict(x, batch_size, **kwargs)",
"def predict(predict_var, x_unlabeled, inputs, batch_sizes, view_size):\n x = x_unlabeled\n\n # calculate batches for predict loop\n unlabeled_batch_size = batch_sizes.get(\"Embedding\", 0)\n batch_size = min(len(x[0]), unlabeled_batch_size)\n batches = make_batches(len(x[0]), batch_size)\n\n y_preds = []\n # predict over all points\n for j, (batch_start, batch_end) in enumerate(batches):\n feed_dict = {K.learning_phase(): 0}\n # feed corresponding input for each input_type\n for input_type, input_placeholder in inputs.items():\n if input_type == \"Embedding\":\n for i in range(view_size):\n feed_dict[input_placeholder[i]] = x[i][batch_start:batch_end]\n elif input_type == \"Orthogonal\":\n batch_ids = np.random.choice(\n len(x), size=min(len(x), batch_sizes[input_type]), replace=False\n )\n for i in range(view_size):\n feed_dict[input_placeholder[i]] = x[i][batch_ids]\n else:\n raise Exception(\"Unrecognized feed name ['{}']\".format(input_type))\n # evaluate the batch\n y_pred_batch = np.asarray(K.get_session().run(predict_var, feed_dict=feed_dict))\n y_preds.append(y_pred_batch)\n y_list = np.concatenate(y_preds, axis=1)\n\n return y_list",
"def predict(model, img, target_size=(229, 229)): #fixed size for InceptionV3 architecture\r\n if img.size != target_size:\r\n img = img.resize(target_size)\r\n\r\n x = image.img_to_array(img)\r\n x = np.expand_dims(x, axis=0)\r\n x = preprocess_input(x)\r\n preds = model.predict(x)\r\n return preds[0]",
"def predict(self, x, logits=False, batch_size=128):\n\n # Apply defences\n x_preproc = self._apply_processing(x)\n x_preproc, _ = self._apply_defences(x_preproc, None, fit=False)\n\n # Run predictions with batching\n preds = np.zeros((x_preproc.shape[0], self.nb_classes))\n for batch_index in range(int(np.ceil(x_preproc.shape[0] / float(batch_size)))):\n begin, end = batch_index * batch_size, min((batch_index + 1) * batch_size, x_preproc.shape[0])\n preds[begin:end] = self._preds([x_preproc[begin:end]])[0]\n\n if not logits and not self._custom_activation:\n exp = np.exp(preds[begin:end] - np.max(preds[begin:end], axis=1, keepdims=True))\n preds[begin:end] = exp / np.sum(exp, axis=1, keepdims=True)\n\n return preds",
"def predict_batch(self, model, context, data=None):\n pass",
"def predict(self,Xpred, nsamples=2000, tune=100, progress=True, points2=[]):\n if self.type_y=='affine':\n return self.predict_affine(Xpred, nsamples, tune, progress, points2)\n elif self.type_y=='regression':\n return self.predict_regression(Xpred, nsamples, tune, progress, points2)\n elif self.type_y=='mixed':\n return self.predict_mixed(Xpred, nsamples, tune, progress, points2)",
"def predict_step(self, *args: Any, **kwargs: Any) -> Tensor:\n batch = args[0]\n x = batch[\"image\"]\n y_hat: Tensor = self(x).softmax(dim=1)\n return y_hat",
"def predict_dataset(filenames, path, model, model_preprocess_function):\n y_predicted = []\n batch_size = 32\n batch = []\n for filename in filenames:\n batch.append(preprocess(path+filename, model_preprocess_function))\n if len(batch) >= batch_size:\n y_predicted = y_predicted + model.predict(np.array(batch)).tolist()\n batch = []\n y_predicted = y_predicted + model.predict(np.array(batch)).tolist()\n return y_predicted",
"def predict(model, images):\n return model.predict_classes(images)",
"def predict(self, inputs, oversample=True):\r\n # Scale to standardize input dimensions.\r\n input_ = np.zeros((len(inputs),\r\n self.image_dims[0],\r\n self.image_dims[1],\r\n inputs[0].shape[2]),\r\n dtype=np.float32)\r\n print inputs[0].shape\r\n print input_.shape\r\n for ix, in_ in enumerate(inputs):\r\n input_[ix] = caffe.io.resize_image(in_, self.image_dims)\r\n\r\n # if oversample:\r\n # # Generate center, corner, and mirrored crops.\r\n # input_ = caffe.io.oversample(input_, self.crop_dims)\r\n # else:\r\n # # Take center crop.\r\n # center = np.array(self.image_dims) / 2.0\r\n # crop = np.tile(center, (1, 2))[0] + np.concatenate([\r\n # -self.crop_dims / 2.0,\r\n # self.crop_dims / 2.0\r\n # ])\r\n # crop = crop.astype(int)\r\n # input_ = input_[:, crop[0]:crop[2], crop[1]:crop[3], :]\r\n\r\n # Classify\r\n caffe_in = np.zeros(np.array(input_.shape)[[0, 3, 1, 2]],\r\n dtype=np.float32)\r\n for ix, in_ in enumerate(input_):\r\n caffe_in[ix] = self.transformer.preprocess(self.inputs[0], in_)\r\n out = self.forward_all(**{self.inputs[0]: caffe_in})\r\n predictions = out[self.outputs[0]]\r\n\r\n # # For oversampling, average predictions across crops.\r\n # if oversample:\r\n # predictions = predictions.reshape((len(predictions) / 10, 10, -1))\r\n # predictions = predictions.mean(1)\r\n\r\n return predictions",
"def predict(model, img, target_size):\n if img.size != target_size:\n img = img.resize(target_size)\n\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n preds = model.predict(x)\n return preds[0]",
"def predict(model, img, target_size):\n if img.size != target_size:\n img = img.resize(target_size)\n\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n preds = model.predict(x)\n return preds[0]",
"def predict(self, x, **kwargs):\n kwargs = self.filter_sk_params(Sequential.predict, kwargs)\n return np.squeeze(self.model.predict(x, **kwargs))",
"def predict(trained_model, X_test, y_test, image_name):\n if MODEL == 1:\n return predict_1(trained_model, X_test, y_test)\n elif MODEL == 3:\n if CROSS_VALIDATION:\n return cv_predict_3(trained_model, X_test, y_test)\n else:\n return predict_3(trained_model, X_test, y_test, image_name)\n elif MODEL == 2:\n return predict_2(trained_model, X_test, y_test)\n else:\n # For models 4, 5 and 6\n return predict_4(trained_model, X_test, y_test)",
"def predict_batches(model, X, batchsize=None):\n if batchsize is None:\n batchsize = model.flags.bs\n pred = []\n for batch in grouper(X, batchsize):\n pred.append(model.predict(np.array(batch)))\n\n return np.concatenate(pred)",
"def prediction(self, X):\n images = self.preprocess_images(X)\n return self.model.predict(images)",
"def predict(self):\n self.predicted_test_summary = []\n for step in xrange(0, self.test_size // self.test_batch_size):\n print 'Predicting Batch No.:', step\n offset = (step * self.test_batch_size) % self.test_size\n batch_data_fwd = self.X_tst_fwd[offset:(offset + self.test_batch_size), :].T\n batch_data_bwd = self.X_tst_bwd[offset:(offset + self.test_batch_size), :].T\n summary_test_out = self._predict_batch(batch_data_fwd, batch_data_bwd)\n self.predicted_test_summary.extend(summary_test_out)\n\n print 'Prediction Complete. Moving Forward..'\n\n # test answers\n self.test_review = self.X_tst_fwd\n self.predicted_test_summary = self.predicted_test_summary\n self.true_summary = self.Y_tst",
"def predict(model, img):\n\tx = image.img_to_array(img)\n\tx = np.expand_dims(x, axis=0)\n\tx = preprocess_input(x)\n\tpreds = model.predict(x)\n\treturn preds[0]"
] |
[
"0.7239248",
"0.7074474",
"0.69302636",
"0.68597704",
"0.6841375",
"0.6764959",
"0.6649414",
"0.6614922",
"0.6554689",
"0.6516499",
"0.64637536",
"0.64612246",
"0.6373398",
"0.63642263",
"0.63606596",
"0.63237506",
"0.6289094",
"0.62845325",
"0.6273538",
"0.62653995",
"0.6237582",
"0.618222",
"0.61600596",
"0.61600596",
"0.6155271",
"0.6144221",
"0.6119147",
"0.6114651",
"0.61134136",
"0.61037475"
] |
0.8082307
|
0
|
Unstack batch dimension and split into channels and alpha mask.
|
def unstack_and_split(self, x, batch_size, num_channels=3):
unstacked = torch.reshape(x, [batch_size, -1] + list(x.shape)[1:])
channels, masks = torch.split(unstacked, [num_channels, 1], dim=2)
return channels, masks
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _reshape_channels(x):\n assert x.dim() == 4\n batch_size, nc, h, w = x.size()\n x_t = x.view(batch_size, nc, -1).transpose(1, 2).contiguous()\n x_t = x_t.view(batch_size, h, w, nc)\n return x_t",
"def batch_collate_fn(batch):\n images = []\n masks = []\n \n for (image, trimap, mask) in batch:\n mask = mask.unsqueeze(0)\n trimap = trimap.unsqueeze(0)\n image = torch.cat([image, trimap], 0).unsqueeze(0)\n \n images.append(image)\n masks.append(mask)\n\n images = torch.cat(images, 0)\n masks = torch.cat(masks, 0)\n\n return (images, masks)",
"def test_unstack2():\n x = np.arange(1, 25).reshape((4, 2, 3)).astype(np.float32)\n axis = 2\n x_tensor = paddle.to_tensor(x)\n out_list = paddle.unstack(x_tensor, axis=axis)\n length = len(out_list)\n for i in range(length):\n ept = x[:, :, i]\n npt.assert_allclose(out_list[i].numpy(), ept)",
"def test_unstack1():\n x = np.arange(1, 25).reshape((4, 2, 3)).astype(np.float32)\n axis = -1\n x_tensor = paddle.to_tensor(x)\n out_list = paddle.unstack(x_tensor, axis=axis)\n length = len(out_list)\n for i in range(length):\n ept = x[:, :, i]\n npt.assert_allclose(out_list[i].numpy(), ept)",
"def test_unstack():\n x = np.arange(1, 13).reshape((3, 2, 2)).astype(np.int32)\n axis = 0\n x_tensor = paddle.to_tensor(x)\n out_list = paddle.unstack(x_tensor, axis=axis)\n length = len(out_list)\n for i in range(length):\n ept = x[i, :, :]\n npt.assert_allclose(out_list[i].numpy(), ept)",
"def flatten_image(x):\n *batch_shape, h, w, c = x.shape\n return x.reshape((*batch_shape, h * w * c))",
"def test_unstack3():\n x = np.arange(1, 49).reshape((4, 3, 2, 2)).astype(np.float64)\n axis = 1\n x_tensor = paddle.to_tensor(x)\n out_list = paddle.unstack(x_tensor, axis=axis)\n length = len(out_list)\n for i in range(length):\n ept = x[:, i, :, :]\n npt.assert_allclose(out_list[i].numpy(), ept)",
"def _reshape(self, data):\n batch_size, height, width, n_channels = data.shape\n if self._grid_height:\n grid_height = self._grid_height\n else:\n grid_height = int(math.floor(math.sqrt(batch_size)))\n\n grid_width = int(math.ceil(batch_size/grid_height))\n\n if n_channels == 1:\n data = np.tile(data, (1, 1, 1, 3))\n n_channels = 3\n\n if n_channels != 3:\n raise ValueError('Image batch must have either 1 or 3 channels, but '\n 'was {}'.format(n_channels))\n\n shape = (height * grid_height, width * grid_width, n_channels)\n buf = np.full(shape, 255, dtype=np.uint8)\n multiplier = 1 if data.dtype in (np.int32, np.int64) else 255\n\n for k in range(batch_size):\n i = k // grid_width\n j = k % grid_width\n arr = data[k]\n x, y = i * height, j * width\n buf[x:x + height, y:y + width, :] = np.clip(\n multiplier * arr, 0, 255).astype(np.uint8)\n\n if self._zoom > 1:\n buf = buf.repeat(self._zoom, axis=0).repeat(self._zoom, axis=1)\n return buf",
"def detection_collate(batch):\n targets = []\n imgs = []\n masks = []\n num_crowds = []\n\n for sample in batch:\n imgs.append(sample[0])\n targets.append(torch.FloatTensor(sample[1][0]))\n masks.append(torch.FloatTensor(sample[1][1]))\n num_crowds.append(sample[1][2])\n\n return imgs, (targets, masks, num_crowds)",
"def make_grid(batch_img: torch.Tensor,\n batch_mask: torch.Tensor,\n img_denormalize_fn: Callable,\n mask_palette: Optional[Sequence] = default_palette,\n batch_gt_mask: Optional[torch.Tensor] = None):\n assert isinstance(batch_img, torch.Tensor) and isinstance(batch_mask, torch.Tensor)\n assert len(batch_img) == len(batch_mask)\n\n if batch_gt_mask is not None:\n assert isinstance(batch_gt_mask, torch.Tensor)\n assert len(batch_mask) == len(batch_gt_mask)\n\n b = batch_img.shape[0]\n h, w = batch_img.shape[2:]\n\n le = 3 if batch_gt_mask is None else 3 + 2\n out_image = np.zeros((h * le, w * b, 3), dtype='uint8')\n\n for i in range(b):\n img = batch_img[i]\n mask = batch_mask[i]\n\n img = img_denormalize_fn(img)\n img = tensor_to_numpy(img)\n img = render_image(img)\n mask = mask.cpu().numpy()\n mask = render_mask(mask, mask_palette)\n\n out_image[0:h, i * w:(i + 1) * w, :] = img\n out_image[1 * h:2 * h, i * w:(i + 1) * w, :] = render_datapoint(img,\n mask,\n blend_alpha=0.4)\n out_image[2 * h:3 * h, i * w:(i + 1) * w, :] = mask\n\n if batch_gt_mask is not None:\n gt_mask = batch_gt_mask[i]\n gt_mask = gt_mask.cpu().numpy()\n gt_mask = render_mask(gt_mask, mask_palette)\n out_image[3 * h:4 * h, i * w:(i + 1) * w, :] = render_datapoint(img,\n gt_mask,\n blend_alpha=0.4)\n out_image[4 * h:5 * h, i * w:(i + 1) * w, :] = gt_mask\n\n return out_image",
"def cutmix(batch: Tuple[torch.Tensor, torch.Tensor], alpha: float = 1.0) -> Tuple:\n data, targets = batch\n indices = torch.randperm(data.size(0))\n shuffled_data = data[indices]\n shuffled_targets = targets[indices]\n lam = np.random.beta(alpha, alpha) if alpha > 0 else 1\n\n x0, x1, y0, y1 = random_bbox(data, lam)\n\n data[:, :, y0:y1, x0:x1] = shuffled_data[:, :, y0:y1, x0:x1]\n\n targets = (targets, shuffled_targets, lam)\n\n return data, targets",
"def unstack_batch(tensor_dict):\n # # extract tensor from tuple. TODO: figure out where box tuple comes from?\n for key in tensor_dict.keys():\n if key == \"gt_boxes\":\n tensor_dict[\"gt_boxes\"] = tensor_dict[\"gt_boxes\"][0]\n unbatched_tensor_dict = {key: tf.unstack(tensor) for key, tensor in tensor_dict.items()}\n # remove padding along 'num_boxes' dimension of the gt tensors\n num_gt_list = unbatched_tensor_dict[\"num_gt_boxes\"]\n unbatched_unpadded_tensor_dict = {}\n for key in unbatched_tensor_dict:\n if key == \"num_gt_boxes\":\n continue\n unpadded_tensor_list = []\n for num_gt, padded_tensor in zip(num_gt_list, unbatched_tensor_dict[key]):\n tensor_shape = shape_utils.combined_static_and_dynamic_shape(padded_tensor)\n slice_begin = tf.zeros(len(tensor_shape), dtype=tf.int32)\n slice_size = tf.stack([num_gt] + [-1 if dim is None else dim for dim in tensor_shape[1:]])\n unpadded_tensor = tf.slice(padded_tensor, slice_begin, slice_size)\n unpadded_tensor_list.append(unpadded_tensor)\n unbatched_unpadded_tensor_dict[key] = unpadded_tensor_list\n return unbatched_unpadded_tensor_dict",
"def _batchify(data: nd.NDArray, batch_size):\n # Work out how cleanly we can divide the dataset into bsz parts.\n nbatch = len(data) // batch_size\n # Trim off any extra elements that wouldn't cleanly fit (remainders).\n data = data[0: nbatch * batch_size]\n # Evenly divide the data across the bsz batches.\n data = data.reshape(batch_size, -1).transpose()\n # if torch.cuda.is_available():\n # data = data.cuda()\n return data",
"def batch_to_space(stacked_patches, tiles, shape_padded_label, shape_image, channels, b_verbose=False):\n\n shape_stacked = tf.unstack(tf.shape(stacked_patches)) # something like [x, img_1, img_2, img_3, channel]\n stacked_patches = tf.reshape(stacked_patches, [tiles[0], tiles[1], tiles[2], *shape_stacked[1:]]) # split stacks into tiles\n stacked_patches = tf.transpose(stacked_patches, perm=[0, 3, 1, 4, 2, 5, 6]) # interleave tiles and img dims\n if b_verbose:\n stacked_patches = tf.Print(stacked_patches, [tf.shape(stacked_patches)], 'stacked_patches:', summarize=10)\n image = tf.reshape(stacked_patches, [-1, *shape_padded_label, channels]) # reshape into proper image\n if b_verbose:\n image = tf.Print(image, [tf.shape(image)], 'new_image:', summarize=10)\n\n # crop image to final size\n pos_begin = [int((a - b) / 2) for a, b in zip(shape_padded_label, shape_image)]\n image = tf.Print(image, [tf.shape(image), pos_begin, shape_image, channels], 'shapes before slicing', summarize=5)\n image = tf.slice(image,\n [0, *pos_begin, 0],\n [1, *shape_image, channels])\n\n return image",
"def forward(self, x, alpha=1e-8):\r\n batch_size, _, height, width = x.shape\r\n\r\n # [B x C x H x W] Subtract mean over batch.\r\n y = x - x.mean(dim=0, keepdim=True)\r\n\r\n # [1 x C x H x W] Calc standard deviation over batch\r\n y = torch.sqrt(y.pow(2.).mean(dim=0, keepdim=False) + alpha)\r\n\r\n # [1] Take average over feature_maps and pixels.\r\n y = y.mean().view(1, 1, 1, 1)\r\n\r\n # [B x 1 x H x W] Replicate over group and pixels.\r\n y = y.repeat(batch_size, 1, height, width)\r\n\r\n # [B x C x H x W] Append as new feature_map.\r\n y = torch.cat([x, y], 1)\r\n\r\n # return the computed values:\r\n return y",
"def _batchify(batch):\n im_name, im0, im1, im2, im3, im4, im5, im6, im7, im8, im9 = zip(*batch)\n im0 = nd.stack(*im0)\n im1 = nd.stack(*im1)\n im2 = nd.stack(*im2)\n im3 = nd.stack(*im3)\n im4 = nd.stack(*im4)\n im5 = nd.stack(*im5)\n im6 = nd.stack(*im6)\n im7 = nd.stack(*im7)\n im8 = nd.stack(*im8)\n im9 = nd.stack(*im9)\n return im_name, im0, im1, im2, im3, im4, im5, im6, im7, im8, im9",
"def collate_fn(self, batch):\n images = list()\n targets = list()\n\n for b in batch:\n images.append(b[0])\n targets.append(b[1])\n\n # images = torch.stack(images, dim=0)\n\n return images, targets # tensor (N, 3, 300, 300), 3 lists of N tensors each",
"def combined_masks(action_mask,betsize_mask):\n if action_mask.dim() > 2:\n return torch.cat([action_mask[:,:,:-2],betsize_mask],dim=-1)\n elif action_mask.dim() > 1:\n return torch.cat([action_mask[:,:-2],betsize_mask],dim=-1)\n else:\n return torch.cat([action_mask[:-2],betsize_mask])",
"def detection_collate(batch):\n targets = []\n imgs = []\n for sample in batch:\n imgs.append(sample[0])\n targets.append(torch.FloatTensor(sample[1]))\n return torch.stack(imgs, 0), targets",
"def split_and_concat_model():\n x = tf.keras.Input(shape=[224, 224, 3, ])\n # TODO: implement split for the following commented out method of splitting\n # y1 = x[:, :100, :, :]\n # y2 = x[:, 101:, :, :]\n y1, y2 = tf.split(x, [100, 124], 1)\n y1 = tf.nn.relu(y1)\n y2 = tf.keras.layers.BatchNormalization()(y2)\n z = tf.keras.layers.concatenate([y1, y2], axis=1)\n z = tf.keras.layers.Flatten()(z)\n output = tf.keras.layers.Dense(10, activation=tf.nn.softmax, name=\"split_and_concat_model\")(z)\n return output",
"def unbatch_stack(S, grid_shape):\n\tI, J = grid_shape\n\tC, M = S.shape[1], S.shape[2]\n\treturn S.reshape(-1, I, J, C, M, M)",
"def collate_fn(self, batch):\n images = list()\n boxes = list()\n labels = list()\n difficulties = list()\n\n for b in batch:\n images.append(b[0])\n boxes.append(b[1])\n labels.append(b[2])\n difficulties.append(b[3])\n\n images = torch.stack(images, dim=0)\n\n return images, boxes, labels, difficulties # tensor (N, 3, 300, 300), 3 lists of N tensors each",
"def split_3d_array_into_channels(arr):\n return [arr[:, :, i] for i in range(arr.shape[-1])]",
"def forward_single(self, x: Tensor, batch_img_metas: List[dict]):\n img_h, img_w = batch_img_metas[0]['pad_shape'][:2]\n batch_size, _, feat_h, feat_w = x.shape\n downsample_ratio = img_h / feat_h\n\n for conv_cls_prev_layer in self.conv_cls_prev:\n cls_feat = conv_cls_prev_layer(x)\n out_cls = self.conv_cls(cls_feat)\n\n if self.use_edge_fusion:\n # calculate the edge indices for the batch data\n edge_indices_list = get_edge_indices(\n batch_img_metas, downsample_ratio, device=x.device)\n edge_lens = [\n edge_indices.shape[0] for edge_indices in edge_indices_list\n ]\n max_edge_len = max(edge_lens)\n edge_indices = x.new_zeros((batch_size, max_edge_len, 2),\n dtype=torch.long)\n for i in range(batch_size):\n edge_indices[i, :edge_lens[i]] = edge_indices_list[i]\n # cls feature map edge fusion\n out_cls = self.edge_fuse_cls(cls_feat, out_cls, edge_indices,\n edge_lens, feat_h, feat_w)\n\n bbox_pred = []\n\n for i in range(len(self.group_reg_dims)):\n reg_feat = x.clone()\n # feature regression head\n if len(self.reg_branch[i]) > 0:\n for conv_reg_prev_layer in self.conv_reg_prevs[i]:\n reg_feat = conv_reg_prev_layer(reg_feat)\n\n for j, conv_reg in enumerate(self.conv_regs[i]):\n out_reg = conv_reg(reg_feat)\n # Use Edge Fusion Module\n if self.use_edge_fusion and (i, j) in self.edge_fusion_inds:\n # reg feature map edge fusion\n out_reg = getattr(self, 'edge_fuse_reg_{}_{}'.format(\n i, j))(reg_feat, out_reg, edge_indices, edge_lens,\n feat_h, feat_w)\n bbox_pred.append(out_reg)\n\n bbox_pred = torch.cat(bbox_pred, dim=1)\n cls_score = out_cls.sigmoid() # turn to 0-1\n cls_score = cls_score.clamp(min=1e-4, max=1 - 1e-4)\n\n return cls_score, bbox_pred",
"def tohost(x):\n n_device, n_batch, *remaining_dims = x.shape\n return x.reshape((n_device * n_batch,) + tuple(remaining_dims))",
"def adapt_batch(batch):\n image_arrays, labellings = batch\n\n current_batch_size = len(labellings)\n\n images = np.array(image_arrays).reshape(current_batch_size, *image_arrays[0].shape)\n\n padded_labellings = pad_labellings(labellings)\n\n labels = np.array(padded_labellings, dtype=np.int32).reshape(current_batch_size, -1)\n\n input_lengths = compute_input_lengths(image_arrays)\n\n label_lengths = np.array([len(labelling) for labelling in labellings],\n dtype=np.int32).reshape(current_batch_size, 1)\n\n return [images, labels, input_lengths, label_lengths], labels",
"def split_images(x, y=None, size=(128, 128), num_part=4):\n x_patches = image.PatchExtractor(patch_size=size, max_patches=num_part, random_state=0)\n x_imgs = x_patches.transform(x)\n # Check if number of channels is the same for grayscale\n if x.shape[-1] != x_imgs.shape[-1]:\n x_imgs = x_imgs[:, :, :, np.newaxis]\n\n if not y is None:\n y_patches = image.PatchExtractor(patch_size=size, max_patches=num_part, random_state=0)\n y_imgs = y_patches.transform(y)\n\n # Check if number of channels is the same for grayscale\n if y.shape[-1] != y_imgs.shape[-1]:\n y_imgs = y_imgs[:, :, :, np.newaxis]\n\n return x_imgs, y_imgs\n\n return x_imgs",
"def get_channels(batch):\n y = batch[..., :1]\n uv = batch[..., 1:3]\n edge = batch[..., 3:]\n\n return y, uv, edge",
"def unstack(a, axis=0):\n shape = a.shape\n return [jnp.squeeze(b, axis=axis) for b in \\\n jnp.split(a, shape[axis], axis=axis)]",
"def _to_stack(self, values):\n if self.batch_size > 1:\n try:\n values = np.stack(values, axis=0)\n except Exception as identifier:\n for x in values:\n print(x.shape)\n _, ax = plt.subplots(1)\n ax.imshow(x[..., 0])\n ax.set_title('ERROR!')\n plt.show()\n print(identifier)\n raise(Exception)\n else:\n values = values[0][np.newaxis, ...]\n return values"
] |
[
"0.59909207",
"0.58123934",
"0.5716505",
"0.5716326",
"0.5704696",
"0.5700815",
"0.55700964",
"0.5564052",
"0.55450964",
"0.55180305",
"0.551707",
"0.54962254",
"0.5416754",
"0.5411667",
"0.5395286",
"0.5373396",
"0.53642625",
"0.5344805",
"0.5337699",
"0.5336697",
"0.53321487",
"0.53207237",
"0.5320249",
"0.5304845",
"0.5279684",
"0.5278637",
"0.5278417",
"0.5277993",
"0.52759033",
"0.5266497"
] |
0.7611791
|
0
|
Animals that can speak are correctly identified
|
def test_animals_can_speak(self):
self.assertEqual(self.lion, 'roar')
self.assertEqual(self.cat, 'meow')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def animals_by_species(self):\n print self.animal()",
"def animal_eats(self):\n self.update_fodder()\n self.herbivore_eats()\n self.carnivore_eats()",
"def substantiate():",
"def speak(self):\n # Speaks randomly to another agent on the same cell\n anticipated_meaning = None\n cellmates = self.model.grid.get_cell_list_contents([self.pos])\n\n # If other agents on the same cell\n if len(cellmates) > 1:\n hearer = self.random.choice(cellmates)\n\n while (hearer == self): # agents should not talk to themselves\n hearer = self.random.choice(cellmates)\n\n meaning = self.random.choice(self.model.schedule.agents).unique_id\n\n # If the speaker is not acquainted with the meaning\n if meaning not in self.meanings:\n print(\"New meaning added to speaker\")\n self.meanings.append(meaning)\n return Conversation(word=None, meaning=None, success=0.0)\n\n # If the hearer is not acquainted with the meaning\n if meaning not in hearer.meanings:\n print(\"New meaning added to hearer\")\n hearer.meanings.append(meaning)\n return Conversation(word=None, meaning=None, success=0.0)\n\n # 50% chance of having an anticipated meaning default\n if self.random.random() <= self.model.antecipated_prob:\n print(\" \" + str(self.unique_id) +\n \" points at \" + str(meaning))\n anticipated_meaning = meaning\n\n # If the speaker has a word for the meaning\n if meaning in self.meaning2word:\n word = self.meaning2word[meaning]\n\n # If the hearer has a word for the meaning\n if word in hearer.word2meaning:\n # If the hearer has no anticipated meaning\n if anticipated_meaning == None:\n return Conversation(word=word, meaning=meaning, success=1.0)\n # If anticipated meaning different from hearer meaning\n if (anticipated_meaning != None\n and anticipated_meaning != hearer.word2meaning[word]):\n hearer.delete_link(word)\n hearer.create_link(word, anticipated_meaning)\n return None\n # If anticipated meaning same as hearer meaning\n if (anticipated_meaning != None\n and anticipated_meaning == hearer.word2meaning[word]):\n return Conversation(word=word, meaning=meaning, success=1.0)\n\n # If the hearer has no word for the meaning\n else:\n # If anticipated meaning same as speaker meaning\n if (anticipated_meaning != None\n and word not in hearer.word2meaning\n and anticipated_meaning not in hearer.meaning2word):\n hearer.create_link(word, anticipated_meaning)\n return Conversation(word=word, meaning=meaning, success=0.0)\n\n # If the speaker has no word for the meaning\n if meaning not in self.meaning2word:\n return Conversation(word=None, meaning=meaning, success=0.0)",
"def add_animal(self, animal):\n try:\n if animal.saltwater:\n super().add_animal(animal)\n except AttributeError:\n raise AttributeError(\"Animal Is Incompatible With Biome\")",
"def on_object(self, image, objects):\n for obj in objects:\n if self.is_object_recognition_appropriate(obj.name):\n self.say(\"I see a {}\".format(obj.name))",
"def _sense_and_act(self):\n pass",
"def recognize():\n return 0",
"def animals(self):\n return self.herbivores + self.carnivores",
"def all_animals_eat(self):\n for cell in itertools.chain.from_iterable(self.map):\n if type(cell).__name__ in self.allowed_cells:\n cell.gen_fodder()\n cell.eat_herbivore()\n cell.eat_carnivore()",
"def animals_gives_birth(self):\n for species, animals in self.new_fauna_list.items():\n for i in range(math.floor(len(self.new_fauna_list[species])/2)):\n animal = animals[i]\n if animal.probability_of_birth(len(animals)):\n offspring_species = animal.__class__\n offspring = offspring_species()\n animal.update_weight_after_birth(offspring)\n if animal.gives_birth:\n self.fauna_list[species].append(offspring)\n animal.gives_birth = False",
"def __init__(self):\n IContainsAnimals.__init__(self, 15)\n IContainsPlants.__init__(self, 3)\n Identifiable.__init__(self)\n Biome.__init__(self, \"Coastline\")",
"def known_organisms():\n return [\"rat\"]",
"def _get_animal_from_message(self, message):\n animal = None\n\n # Try to find an animal from our inventory in the message\n find_animal_regex = r'({animals})'.format(animals='|'.join(self.animals))\n ret = re.findall(find_animal_regex, message)\n\n # re.findall return is a list of matching strings in the message\n # Is an empty list if no match found\n if ret:\n animal = random.choice(ret)\n\n return animal",
"def is_actor():\n return False",
"def animal_dies(self):\n for species, animals in self.fauna_list.items():\n for animal in animals:\n if animal.probability_of_death:\n self.remove_animal(animal)",
"def test_check_ambigous(self):\r\n\r\n flow0 = Flowgram(\"\")\r\n flow1 = Flowgram(\r\n \"0 1.2 2.1 3.4 0.02 0.01 1.02 0.08 0.5 1.0 4.1 0.0 0.0 1.23 0.0 3.1\")\r\n flow2 = Flowgram(\r\n \"0 1.2 2.1 3.4 0.02 0.01 1.02 0.08 0.5 1.0 4.1 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.23 0.0 3.1\")\r\n flow3 = Flowgram(\r\n \"0 1.2 2.1 3.4 0.02 0.0 0.0 0.01 1.02 0.08 0.5 1.0 4.1 0.0 0.0 0.0 0.0 1.23 0.0 3.1\")\r\n\r\n self.assertEqual(check_ambigous(flow0, 4), False)\r\n self.assertEqual(check_ambigous(flow1, 4), False)\r\n self.assertEqual(check_ambigous(flow2, 4), True)\r\n self.assertEqual(check_ambigous(flow2, 7), True)\r\n self.assertEqual(check_ambigous(flow2, 8), False)\r\n self.assertEqual(check_ambigous(flow3, 3), True)\r\n self.assertEqual(check_ambigous(flow3, 4), False)",
"def test_interaction_accepts_name():\n demag = ThinFilmDemag()\n assert hasattr(demag, 'name')",
"async def fox(self, interaction: Interaction):\n await post_random_animal_command(interaction)",
"def add(self, animal):\n x, y = animal.coords\n if animal.species == 'Elephant' and self.__nb_elephants < 5 and (x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:\n self[x][y] = animal\n self.__nb_elephants += 1\n self.playerTurn = \"Rhinoceros\"\n\n elif animal.species == 'Rhinoceros' and self.__nb_rhinoceros < 5 and (x == 0 or x == 4 or y == 0 or y == 4) and self[x][y] == 0:\n self[x][y] = animal\n self.__nb_rhinoceros += 1\n self.playerTurn = \"Elephant\"\n else:\n return False",
"def think(s):",
"def ate_poison(self):\r\n for food in self.get_dna():\r\n i = food.split(\",\")\r\n if \"poison\" in food:\r\n print(self.get_species(), \"from generation\", self.get_generation(),\r\n \"died from poison after eating\", i[0])\r\n return (True)",
"def sense_and_act(self):\n pass",
"def classify(self, audio_sample, should_print=True):\n features_left, features_right = self.extract_features(audio_sample)\n classification_counts = [0 for x in range(len(self.speakers))]\n\n for i in range(len(features_left)):\n feature = np.reshape(features_left[i, :], (1, -1))\n\n left_pred = int(self.left_model.predict(feature)[0])\n classification_counts[left_pred] += 1\n\n if self.both_channels:\n right_pred = int(self.right_model.predict(feature)[0])\n classification_counts[right_pred] += 1\n\n probabilities = np.array(classification_counts) / sum(classification_counts)\n pred = np.argmax(probabilities)\n\n if should_print:\n print(probabilities)\n\n if probabilities[pred] > self.certainty:\n print(\"Identified %s\" % self.speakers[pred])\n return self.speakers[pred]\n else:\n print(\"Unidentified Speaker\")\n return -1",
"def handDecision(handIn):",
"def random_data(self) -> (str, str):\n random_animal = random.choice(self.animals_list)\n synsets = wn.synsets(str(random_animal))\n definition = \"\"\n while True:\n if len(synsets) != 0:\n for synset in synsets:\n if synset.lexname() == 'noun.animal':\n definition = synset.definition()\n break\n else:\n random_animal = random.choice(self.animals_list)\n synsets = wn.synsets(str(random_animal))\n return random_animal, definition",
"def an(text):\n text = force_unicode(text)\n if not CONSONANT_SOUND.match(text) and VOWEL_SOUND.match(text):\n return 'an'\n return 'a'",
"def immobilized(self, owner):\n messages = []\n immobilized = False\n \n if self.checkOver(owner, messages):\n immobilized = False\n \n elif self.confused(random.randint(0, 1)):\n self.doDamage(owner, messages)\n immobilized = True\n \n return immobilized, messages",
"def test_article_can_speak(self):\n lion = Animal.objects.get(title=\"Akun KU\")\n self.assertEqual(lion.title, 'The lion says \"roar\"')",
"def test_basic_inter_format(self):\n buff_interactions = isambard.buff.find_inter_ampal(\n self.topo, self.ff.distance_cutoff)\n for _ in range(100):\n a, b = random.choice(buff_interactions)\n self.assertTrue(type(a) is isambard.ampal.Atom)\n self.assertTrue(type(b) is isambard.ampal.Atom)\n self.assertTrue(a != b)"
] |
[
"0.5804902",
"0.5754538",
"0.56921846",
"0.56177425",
"0.55703396",
"0.5568995",
"0.5478271",
"0.5457741",
"0.5398982",
"0.53391945",
"0.5301369",
"0.5275877",
"0.52202857",
"0.51787615",
"0.5157603",
"0.51495886",
"0.5125665",
"0.512152",
"0.5121497",
"0.5117309",
"0.50895387",
"0.50793",
"0.50743616",
"0.5070662",
"0.5070132",
"0.50671345",
"0.50386196",
"0.5024982",
"0.5017773",
"0.5013281"
] |
0.5837831
|
0
|
Given a urlsafe version of an Album key, get the actual key
|
def get_album_key_by_keystr(keystr):
attr_err = 'Keystrings must be an instance of base string, recieved: %s' % keystr
kind_err = 'Expected urlsafe keystr for kind %s but received keystr for kind %s instead.'
if not keystr or not isinstance(keystr, basestring):
raise RuntimeError(attr_err)
key = ndb.Key(urlsafe=keystr)
if not key.kind() == PHOTOALBUM_KIND:
raise RuntimeError(kind_err % (PHOTOALBUM_KIND, key.kind()))
return key
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_album_key(slug):\n err = 'Series slug must be defined and of of type basestring'\n\n if not slug or not isinstance(slug, basestring):\n raise RuntimeError(err)\n\n return ndb.Key(PHOTOALBUM_KIND, slug)",
"def get_key_from_url(file_url):\t\n\tparts = urlparse(file_url)\n\tbucket_name = get_bucket_name_from_url(file_url)\n\tkey = parts.path.replace(\"/\" + bucket_name + \"/\", \"\")\n\treturn key",
"def as_key(key):\n return key.lstrip('/').rstrip('/')",
"def key_id(cls, url: str):\r\n ...",
"def create_key_from_url(raw_url):\n org_url = urllib2.urlparse.urlparse(raw_url)\n new_key = ''\n net_location = org_url.netloc\n netloc_list = net_location.split(\".\")\n netloc_list.reverse()\n for part in netloc_list:\n new_key += '%s.' % part\n new_key = new_key[:-1] # Removes trailing period\n new_key = new_key + org_url.path \n return new_key",
"def get_album_by_slug(slug):\n\n album_key = get_album_key(slug)\n album = album_key.get()\n return album",
"def _extract_spreadsheet_key_from_url(url):\r\n result = url\r\n\r\n if 'key=' in url:\r\n result = url.split('key=')[-1].split('#')[0].split('&')[0]\r\n\r\n return result",
"def key(key):\n return key",
"def _get_key_url(self, key):\n urls = self.get_URLS(key)\n\n if len(urls) == 1:\n return urls[0]\n else: # multiple\n # TODO: utilize cache to check which archives might already be\n # present in the cache.\n # Then if not present in the cache -- check which are present\n # locally and choose that one to use\n if self._last_url and self._last_url in urls:\n return self._last_url\n else:\n return urls[0] # just the first one",
"def resolve_key(obj, _):\n return obj.key.decode()",
"def _get_raw_key(self, key_id):",
"def parse_key(key_id):\n\tcomment = get_key_comment(key_id)[0]\n\tregex = re.compile(\".*?\\\\((.*?)\\\\)\")\n\tcomment_bits = re.findall(regex, comment)[0].split(' ')\n\tif comment_bits[0] == sha256(comment_bits[1]).hexdigest():\n\t\treturn comment_bits[1]",
"def parse_key(key_id):\n comment = get_key_comment(key_id)[0]\n regex = re.compile(\".*?\\\\((.*?)\\\\)\")\n comment_bits = re.findall(regex, comment)[0].split(' ')\n if comment_bits[0] == sha256(comment_bits[1]).hexdigest():\n return comment_bits[1]",
"def key_for_bucket(self, key):\n\n try:\n return int(key[0] // 16), int(key[1] // 16), int(key[2] // 16)\n except ValueError:\n return KeyError(\"Key %s isn't usable here!\" % repr(key))",
"def key_for_bucket(self, key):\n\n try:\n return int(key[0] // 16), int(key[1] // 16)\n except ValueError:\n return KeyError(\"Key %s isn't usable here!\" % repr(key))",
"def _get_key_name(self, name):\n base_path = force_text(self.location)\n final_path = urljoin(base_path + \"/\", name)\n name = os.path.normpath(final_path.lstrip('/'))\n\n if six.PY2:\n name = name.encode('utf-8')\n return name",
"def _getNDBKey(websafe_key_to_get):\n return ndb.Key(urlsafe=websafe_key_to_get)",
"def get_store_key(asset):\n return '.'.join([asset.name, asset.uid, asset.ext])",
"def _get_akey_afile(self, key):\n url = self._get_key_url(key)\n return self._parse_url(url)[:2] # skip size",
"def _resolve_apikey(url: str, apikey: Optional[str]) -> Tuple[str, str]:\n # Even though the async api doesn't support apikey query parameter,\n # for ease of use support providing it as query parameter in the url.\n # authorization is always done via Authorization header\n url, params = UrlManipulation.separate_query_params(url, (\"apikey\",))\n try:\n apikey = params[\"apikey\"][0]\n except KeyError:\n pass\n\n if apikey is None:\n raise ValueError(\"apikey not defined\")\n\n return url, apikey",
"def decode_key_from_mongo(fieldname):\r\n return urllib.unquote(fieldname)",
"def get_apiauth_object_by_key(key):\n return model.APIAuth.query.filter_by(key=key).first()",
"def parse_camera_name_from_object_key(object_key):\n first_parts = object_key.split(\"/\")\n return first_parts[1]",
"def get_key_id(self):",
"def get_api_key(api_key):\n api.get(api_key)",
"def fname(key):\n return key.rsplit(\"/\", 1)[-1]",
"def getKey(self, namespace, ns_key):\n namespace = self._fixNS(namespace)\n if namespace == BARE_NS:\n return ns_key\n\n ns_alias = self.namespaces.getAlias(namespace)\n\n # No alias is defined, so no key can exist\n if ns_alias is None:\n return None\n\n if ns_alias == NULL_NAMESPACE:\n tail = ns_key\n else:\n tail = '%s.%s' % (ns_alias, ns_key)\n\n return 'openid.' + tail",
"def get_safe_part(key):\r\n version = key[0]\r\n # This function should only be called on versioned keys.\r\n assert version\r\n\r\n # Find the md5 hash part.\r\n c_link_key = key[1]\r\n for key_element in c_link_key[1:]:\r\n if (isinstance(key_element, basestring)\r\n and key_element.startswith('md5:')):\r\n md5 = key_element[4:]\r\n break\r\n\r\n return key[0] + (md5, )",
"def strkey(item):\n return '%s:%s:%s' % (item['group_id'], item['artifact_id'], item['version'])",
"def sub_key(dirname):\n return SUB_PREFIX + dirname"
] |
[
"0.7271961",
"0.64886624",
"0.6453254",
"0.62893945",
"0.6282472",
"0.62622005",
"0.6248456",
"0.62307674",
"0.61798584",
"0.6131501",
"0.60555077",
"0.60446566",
"0.60090566",
"0.59870636",
"0.5964662",
"0.5961178",
"0.59544635",
"0.5880653",
"0.5861731",
"0.58558726",
"0.58508784",
"0.58307904",
"0.58287644",
"0.5804643",
"0.57614213",
"0.57265276",
"0.57227707",
"0.5697038",
"0.5687968",
"0.56814444"
] |
0.76117
|
0
|
Create a ndb.Key given an Album slug
|
def get_album_key(slug):
err = 'Series slug must be defined and of of type basestring'
if not slug or not isinstance(slug, basestring):
raise RuntimeError(err)
return ndb.Key(PHOTOALBUM_KIND, slug)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def build_key(cls, song_id):\n return ndb.Key(cls, song_id)",
"def get_album_key_by_keystr(keystr):\n attr_err = 'Keystrings must be an instance of base string, recieved: %s' % keystr\n kind_err = 'Expected urlsafe keystr for kind %s but received keystr for kind %s instead.'\n if not keystr or not isinstance(keystr, basestring):\n raise RuntimeError(attr_err)\n\n key = ndb.Key(urlsafe=keystr)\n if not key.kind() == PHOTOALBUM_KIND:\n raise RuntimeError(kind_err % (PHOTOALBUM_KIND, key.kind()))\n\n return key",
"def get_album_by_slug(slug):\n\n album_key = get_album_key(slug)\n album = album_key.get()\n return album",
"def make_album(artist_name, album_title): \n music_album = {\n 'Artist': artist_name.title(),\n 'Album': album_title.title()\n }\n return music_album",
"def make_album(artist, title):\n album_dict = {\n 'artist': artist.title(),\n 'title': title.title(),\n }\n return album_dict",
"def genre_key(genre_name=DEFAULT_GENRE):\n return ndb.Key('Genre', genre_name.lower())",
"def blog_key(blog_name=DEFAULT_BLOG_NAME):\n return ndb.Key('Blog', blog_name)",
"def make_album(artist_name, album_title, track_number=''):\n album = {'artist': artist_name,\n 'album title': album_title,\n }\n if track_number:\n album['track number'] = track_number\n return album",
"def key_id(cls, url: str):\r\n ...",
"def cmd_album_create(client, args):\n fields = data_fields(args, client.allowed_album_fields)\n album = client.create_album(fields)\n generate_output({'album': album})",
"def gallery_key():\n return ndb.Key('Gallery', 'All')",
"def create_key(cls, topic, entry_id):\n\t\treturn db.Key.from_path(\n\t\t\t\tFeedRecord.kind(),\n\t\t\t\tFeedRecord.create_key_name(topic),\n\t\t\t\tcls.kind(),\n\t\t\t\tget_hash_key_name(entry_id))",
"def post_key(post_name=DEFAULT_POST_NAME):\n return ndb.Key('Post', post_name)",
"def make_album(artist_name, album_title, tracks=0):\n album = {'artist': artist_name.title(), 'album': album_title.title(),}\n if tracks:\n album['tracks'] = tracks\n return album",
"def create_key(cls, topic):\n\t\treturn datastore_types.Key.from_path(cls.kind(), utils.get_hash_key_name(topic))",
"def _make_key(self, extra_prefix, key):\n if extra_prefix:\n return \"-\".join((self.prefix, extra_prefix, key))\n else:\n return \"-\".join((self.prefix, key))",
"def _make_key(self, extra_prefix, key):\n if extra_prefix:\n return \"-\".join((self.prefix, extra_prefix, key))\n else:\n return \"-\".join((self.prefix, key))",
"def build_key(model, id):\n return \"{}.{}\".format(model.__name__, id)",
"def get_or_create(cls, key, urlsafe=False, **kwargs):\n if urlsafe:\n key = ndb.Key(urlsafe=key)\n ent = key.get()\n if ent is not None:\n return (ent, False) # False meaning \"not created\"\n ent = cls(**kwargs)\n ent.key = key\n ent.put()\n return (ent, True) # True meaning \"created\"",
"def keyify(content_type_pk, pk):\n return '%s:%s' % (content_type_pk, pk)",
"def make_album(artist_name, album_title, num_songs=None):\n album = {\n 'name': artist_name,\n 'album': album_title,\n 'num_songs': num_songs,\n }\n\n # This will not work!\n# album['name'] = artist_name\n# album['album'] = album_title\n# album['num_songs'] = num_songs\n\n return album",
"def set_album(audio: EasyID3, album):\r\n audio['album'] = album\r\n audio.save()",
"def make_album1(artist_name, album_title, track_number=''):\n album = {'artist': artist_name,\n 'album title': album_title,\n }\n if track_number:\n album['track number'] = track_number\n return album",
"def add_contributor_album(slug, username):\n contrib = Contributor.get(username)\n album = Album.get(slug)\n ContributorAlbum(slug=album.slug, username=contrib.username).save()",
"def _makeInternalIdentifier(self, prefix, key):\n\n return '_:' + hashlib.sha1(\n ('fb'+prefix+'key'+key).encode('utf-8')).hexdigest()[1:20]",
"def create_key ():",
"def make_album(artist_name, album_title, no_of_songs=None):\n if no_of_songs:\n album = {'artist': artist_name, 'title': album_title, 'songs': no_of_songs}\n else:\n album = {'artist': artist_name, 'title': album_title}\n \n return album",
"def make_album(name,album_name,song_num=''):\r\n\tmusic_album={'name':name.title(),'album_name':album_name}\r\n\tif song_num:\r\n\t\tmusic_album['song_num']=song_num\r\n\treturn(music_album)",
"def create(self, identity, record=None, data=None, **kwargs):\n self.set_slug(record, data[\"slug\"])",
"def test_get_album_id_regular_album(self):\n album_id = self.add_album(artist='Artist', album='Album')\n self.assertNotEqual(album_id, 0)\n track = Track(artist='Artist', album='Album', title='Title')\n track_album_id = self.app.set_album_id(track)\n self.assertEqual(track_album_id, album_id)\n self.assertEqual(track.album_id, album_id)"
] |
[
"0.6371879",
"0.6166366",
"0.5936699",
"0.58009845",
"0.5732777",
"0.55733705",
"0.55266577",
"0.5525588",
"0.5469047",
"0.53936213",
"0.538625",
"0.53846484",
"0.5360333",
"0.5352547",
"0.534155",
"0.5337662",
"0.5337662",
"0.53374225",
"0.5337322",
"0.53343934",
"0.533107",
"0.5324481",
"0.5324028",
"0.5318831",
"0.5313168",
"0.5292753",
"0.5286904",
"0.5282209",
"0.52726066",
"0.52645755"
] |
0.797927
|
0
|
Given an album slug, fetch the album entity
|
def get_album_by_slug(slug):
album_key = get_album_key(slug)
album = album_key.get()
return album
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_album(album_id):\n return query_single(album_id, Album, album_schema)",
"def album(self, q, page=None):\r\n return self.get('album', q, page)",
"def album(self, uri, detail=None):\r\n extras = self.ALBUM_DETAIL.get(detail)\r\n return self.get(uri, extras)",
"def get_album(self):\n return self._album",
"def _get_album_or_image(json, imgur):\n if json['is_album']:\n return Gallery_album(json, imgur, has_fetched=False)\n return Gallery_image(json, imgur)",
"def get_album_key(slug):\n err = 'Series slug must be defined and of of type basestring'\n\n if not slug or not isinstance(slug, basestring):\n raise RuntimeError(err)\n\n return ndb.Key(PHOTOALBUM_KIND, slug)",
"def get_album(self, object_id, relation=None, **kwargs):\n return self.get_object(\"album\", object_id, relation=relation, **kwargs)",
"def get_album(self) -> Optional[str]:\n return self.album",
"def album(self, album_id, **kwargs):\n _id = self._get_album_id(album_id)\n # pylint: disable=no-member\n return self._get(API.ALBUM.value.format(id=_id), **kwargs)",
"def get_album_by_id(self, album_id):\n self.app.curs.execute('select * from album where alid=%s', (album_id,))\n if self.app.curs.rowcount == 1:\n return self.app.curs.fetchone()\n else: # pragma: no cover\n return None",
"def get_albums(entity_url: str) -> list:\n entity_url = entity_url.rstrip(\"/\")\n response = requests.get(entity_url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n albums = []\n for link in soup.find_all('a'):\n url = link.get('href')\n if url is not None and \"/album/\" in url:\n if url.startswith(\"http\"):\n albums.append(url)\n else:\n albums.append(f\"{entity_url}{url}\")\n return albums",
"async def get_album(self, album_id: int) -> APIReturn:\n return await self._request(\"GET\", \"/getAlbum\", extra_query={\"id\": album_id})",
"def get_random_album(self):\n lib = self.ctrl.library\n artist, album = lib.get_random_album()\n return self.resp_from_data({\n \"artist\": artist,\n \"album\": album,\n \"path\": lib.get_path(artist, album)\n })",
"def search_for_album(album_name):\n\n print(f'Searching for album: {album_name}')\n\n search_result = spotifyObject.search(q=f'\"{album_name}\"', limit=20, type='album')\n\n items = search_result['albums']['items']\n\n results = []\n\n for item in items:\n if len(item['artists']) > 1:\n artists = tuple(art['name'] for art in item['artists'])\n else:\n artists = item['artists'][0]['name']\n\n results.append((artists, item['name'], item['id']))\n\n return results",
"def get_album_list():\n\n # TODO: Paginate this, etc\n entities = PhotoAlbum.query().order(-PhotoAlbum.title).fetch(1000)\n\n return entities",
"def get_albums_by_artist(self, artist_id):\n return self.__get('album', artist_id)",
"def album(self):\n return self.getItunesAttribute('Album')",
"def get_album(self, id):\n url = \"https://api.imgur.com/3/album/{0}\".format(id)\n json = self._send_request(url)\n return Album(json, self)",
"def get_album_cover(self):\n artist = self.get_request_arg(\"artist\")\n album = self.get_request_arg(\"album\")\n if not (album and artist):\n return self.resp_from_data(\n {\"message\": \"Please specify a valid artist and album\"}, 403)\n else:\n cover = self.ctrl.library.get_cover_path(artist, album)\n return self.resp_from_image(cover)",
"def album(self, album_id):\n if not isinstance(album_id, int):\n return \"the id should be an integer\"\n x = requests.get(\n f\"{Endpoints.base_url}album.get?apikey={self.api_key}&album_id={album_id}\"\n )\n if x.json()[\"message\"][\"header\"][\"status_code\"] == 401:\n return \"Invalid API key\"\n if x.json()[\"message\"][\"header\"][\"status_code\"] == 402:\n return (\n \"The usage limit has been reached, either you exceeded per day requests limits or your balance is \"\n \"insufficient. \"\n )\n if x.json()[\"message\"][\"header\"][\"status_code\"] == 403:\n return \"You are not authorized to perform this operation.\"\n if x.json()[\"message\"][\"header\"][\"status_code\"] == 404:\n return f\"No album with given ID:{album_id} found\"\n return x.json()",
"def get_albums(self, offset=None):\n return self.__get('albums')",
"async def search_album(album_name):\n async with aiohttp.ClientSession() as session:\n async with session.get('https://bandcamp.com/api/fuzzysearch/1/autocomplete?q=' + album_name) as resp:\n response = await resp.json()\n\n results = response.get('auto', {}).get('results', [])\n results = [res for res in results if res.get('type') == 'a']\n if not results:\n raise NotFound\n result = results[0]\n async with session.get(result.get('url', 'https://bandcamp.com/')) as resp:\n response = await resp.text()\n try:\n result['release_date'] = response.split('album_release_date: \"')[-1].split('\",')[0].split(':')[0]\n except:\n result['release_date'] = '01 Jan 1970 00'\n result['track_list'] = [getattr(aa.find('span'), 'text', '') for aa in bs4.BeautifulSoup(response, 'html.parser').find('table', {'class':'track_list'}).find_all('tr')]\n\n return BandcampAlbum(result)",
"def get_albums():\n return query_multiple(request.args, album_search, \\\n album_filter, Album, albums_schema)",
"def find_by_name(our_data,name):\n for album in our_data:\n if album['album'] == name:\n return album\n return None",
"def get_album_url(self) -> Optional[str]:\n return self.album_url",
"async def search_song(album_name):\n async with aiohttp.ClientSession() as session:\n async with session.get('https://bandcamp.com/api/fuzzysearch/1/autocomplete?q=' + album_name) as resp:\n response = await resp.json()\n\n results = response.get('auto', {}).get('results', [])\n results = [res for res in results if res.get('type') == 't']\n if not results:\n raise NotFound\n result = results[0]\n async with session.get(result.get('url', 'https://bandcamp.com/')) as resp:\n response = await resp.text()\n try:\n result['release_date'] = response.split('album_release_date: \"')[-1].split('\",')[0].split(':')[0]\n except:\n result['release_date'] = '01 Jan 1970 00'\n result['TrackAlbum'] = bs4.BeautifulSoup(response, 'html.parser').find('span', itemprop='inAlbum').text.strip()\n\n return BandcampSong(result)",
"def get_album(self, album_id, only_active=True):\n options = {\n 'album_id': int(album_id),\n 'only_active': int(bool(only_active)),\n }\n return self._get('get_album', options)",
"def fetch(cls, slug):\n try:\n article = Article.objects.get(slug=slug)\n except Article.DoesNotExist:\n raise exceptions.NotFound(f'Article of slug {slug} nonexistent')\n else:\n return article",
"def fetch(cls, slug):\n try:\n article = Article.objects.get(slug=slug)\n except Article.DoesNotExist:\n raise exceptions.NotFound(f'Article with slug {slug} nonexistent')\n else:\n return article",
"def get_albums(self):\n artist = self.get_request_arg(\"artist\")\n if artist:\n lib = self.ctrl.library\n lst = sorted(self.ctrl.library.get_albums(artist))\n albums = [{\"artist\": artist,\n \"album\": album,\n \"path\": lib.get_path(artist, album)} for album in lst]\n if lst:\n return self.resp_from_data(albums)\n return self.resp_from_data(\n {\"message\": f\"No album found for artist={artist}\"}, 400)"
] |
[
"0.75652915",
"0.70452404",
"0.67959744",
"0.65954405",
"0.65209013",
"0.64783317",
"0.6462776",
"0.6453332",
"0.643563",
"0.64184994",
"0.62658083",
"0.6256562",
"0.6231396",
"0.6121484",
"0.6022898",
"0.60085446",
"0.59636325",
"0.5959914",
"0.59453434",
"0.592426",
"0.5915406",
"0.58989745",
"0.5883402",
"0.58545595",
"0.57640034",
"0.5754611",
"0.57439524",
"0.57378596",
"0.5736686",
"0.5736287"
] |
0.80606824
|
0
|
Fetch a list of Albums
|
def get_album_list():
# TODO: Paginate this, etc
entities = PhotoAlbum.query().order(-PhotoAlbum.title).fetch(1000)
return entities
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_albums():\n return query_multiple(request.args, album_search, \\\n album_filter, Album, albums_schema)",
"def get_albums(self):\n artist = self.get_request_arg(\"artist\")\n if artist:\n lib = self.ctrl.library\n lst = sorted(self.ctrl.library.get_albums(artist))\n albums = [{\"artist\": artist,\n \"album\": album,\n \"path\": lib.get_path(artist, album)} for album in lst]\n if lst:\n return self.resp_from_data(albums)\n return self.resp_from_data(\n {\"message\": f\"No album found for artist={artist}\"}, 400)",
"def albums(self, albums, **kwargs):\n album_list = map(self._get_album_id, albums)\n return self._get(API.ALBUMS.value, ids=\",\".join(album_list), **kwargs)",
"def get_albums(entity_url: str) -> list:\n entity_url = entity_url.rstrip(\"/\")\n response = requests.get(entity_url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n albums = []\n for link in soup.find_all('a'):\n url = link.get('href')\n if url is not None and \"/album/\" in url:\n if url.startswith(\"http\"):\n albums.append(url)\n else:\n albums.append(f\"{entity_url}{url}\")\n return albums",
"def GetAlbums(self, start=0, end=0, sortmethod='label', sortorder='ascending', filter='', artistid=None):\n self.logger.debug(\"Loading all albums for ARTISTID \" + str(artistid))\n try:\n xbmc = Server(self.url('/jsonrpc', True))\n sort = {'order': sortorder, 'method': sortmethod, 'ignorearticle': True}\n properties=['artist', 'title', 'year', 'description', 'thumbnail']\n limits = {'start': int(start), 'end': int(end)}\n if artistid is not None:\n filter = {'artistid': int(artistid)}\n else:\n filter = {'or': [\n {'field': 'album', 'operator': 'contains', 'value': filter},\n {'field': 'artist', 'operator': 'contains', 'value': filter}\n ]}\n return xbmc.AudioLibrary.GetAlbums(properties=properties, limits=limits, sort=sort, filter=filter)\n except ValueError:\n return",
"def get_albums(self):\n self.artist = self.artists_list.currentText()\n self.c_albums = [x['album'] for x in dmlc.list_albums(self.artist)\n if [x['album'] in self.albums_map[self.artist]]]\n self.albums_list.clear()\n self.albums_list.addItems(self.c_albums)\n self.update_navigation_buttons()",
"def simple_album_list():\r\n album_list = []\r\n data = dbase()\r\n for album in data.keys():\r\n album_list += [album]\r\n return album_list",
"def get_albums_alpha(session_):\n artists = session_.query(Album).order_by(Album.title.asc()).all()\n return artists",
"def get_albums(self, limit=None):\n url = (\"https://api.imgur.com/3/account/{0}/albums/{1}\".format(self.name,\n '{}'))\n resp = self._imgur._send_request(url, limit=limit)\n return [Album(alb, self._imgur, False) for alb in resp]",
"def get_albums(self, offset=None):\n return self.__get('albums')",
"def get_albums(username):\n cur = mysql.connection.cursor()\n cur.execute(\"SELECT * FROM album WHERE username = '{0}'\".format(username))\n return cur.fetchall()",
"def fetchAlbumIds(artist_id):\n url = 'https://api.spotify.com/v1/artists/' + artist_id + '/albums?market=US&album_type=album'\n req = requests.get(url)\n\n data = req.json()\n\n #checking for bad return value\n if not req.ok:\n print \"error : \" + data['error']['message']\n return \"error : \" + data['error']['message']\n\n albums = []\n for item in data['items']:\n \talbums.append(item['id'])\n\n return albums",
"def album_list(self):\n\n artist_id = self.addon_args[\"artist_id\"][0]\n\n xbmcplugin.setContent(self.addon_handle, \"albums\")\n\n for album in self.connection.walk_artist(artist_id):\n self.add_album(album)\n\n xbmcplugin.addSortMethod(\n self.addon_handle, xbmcplugin.SORT_METHOD_UNSORTED)\n xbmcplugin.addSortMethod(\n self.addon_handle, xbmcplugin.SORT_METHOD_ALBUM)\n xbmcplugin.addSortMethod(\n self.addon_handle, xbmcplugin.SORT_METHOD_ARTIST)\n xbmcplugin.addSortMethod(\n self.addon_handle, xbmcplugin.SORT_METHOD_VIDEO_YEAR)\n\n xbmcplugin.endOfDirectory(self.addon_handle)",
"def getAlbums():\n\n r = requests.get(ALBUMS_URL, headers=HEADER, timeout=5)\n\n if r.status_code == 200:\n \n try:\n albums = [] \n soup = BeautifulSoup(r.text, \"html.parser\")\n album = soup.find_all(\"div\", class_=\"duv\")\n for i,al in enumerate(album): \n temp = {}\n temp['link'] = al.find_all(\"a\")[0]['href']\n temp['album'] = al.find_all(\"span\", class_=\"title\")[0].text\n albums.append(temp)\n\n if len(albums) > 0:\n return albums\n else:\n print(\"No albums found on site2!\")\n sys.exit(0)\n \n except Exception as e:\n print(\"Failed to get albums from site2\\n\", e)\n sys.exit(0)\n\n else:\n print(\"Albums Url fetch failed! Status code: {}\".format(r.status_code))\n sys.exit(0)",
"def getAlbums(owner_id=None, album_ids=None, offset=None, count=None, need_system=None,\\\n need_covers=None, photo_sizes=None):\n params = {\n 'owner_id': owner_id,\n 'album_ids': album_ids,\n 'offset': offset,\n 'count': count,\n 'need_system': need_system,\n 'need_covers': need_covers,\n 'photo_sizes': photo_sizes\n }\n result = call('photos.getAlbums', **params)\n return parse_response(result)",
"def search_for_album(album_name):\n\n print(f'Searching for album: {album_name}')\n\n search_result = spotifyObject.search(q=f'\"{album_name}\"', limit=20, type='album')\n\n items = search_result['albums']['items']\n\n results = []\n\n for item in items:\n if len(item['artists']) > 1:\n artists = tuple(art['name'] for art in item['artists'])\n else:\n artists = item['artists'][0]['name']\n\n results.append((artists, item['name'], item['id']))\n\n return results",
"def get_albums(self):\n return AlbumView.get_by_artist(self.name)",
"def get_albums_by_artist(albumtype, search_for, sort_on):\n return list(dmla.list_albums_by_artist(albumtype, search_for, sort_on))",
"def cmd_account_albums(client, args):\n account_albums = client.get_account_albums(args.username, args.page)\n data = [item.__dict__ for item in account_albums]\n generate_output({'account_albums': data}, args.output_file)",
"def get_album(album_id):\n return query_single(album_id, Album, album_schema)",
"def read_artist_albums(id, name):\n list_a = [(x.name, str(x.release_year), str(x.id))\n for x in dmla.list_albums_by_artist('', id, 'Jaar')]\n list_c = [(x['album'], x['year']) for x in dmlc.list_albums(name)]\n return list_a, list_c",
"def get_by_artist(cls, artist):\n results = cls.query().filter(FileRecord.artist == artist).order_by(\n FileRecord.year).all()\n albums = []\n if results and len(results) > 0:\n for result in results:\n albums.append(cls(*result))\n\n return albums\n\n return albums",
"def get_top_albums(\n self, period: Period, limit: int = 50, page: int = 1\n ) -> ListModel[Album]:\n assert isinstance(period, Period)\n\n return self.retrieve(\n bind=Album,\n flatten=\"album\",\n params=dict(\n method=\"user.getTopAlbums\",\n user=self.name,\n limit=limit,\n page=page,\n period=period.value,\n ),\n )",
"def get(request, slug):\n calbums = [x.json for x in get_album_contributors(get_album_by_slug(slug))]\n \n response = render_to_response(\n \"data/list.json\",\n {\"data\": calbums},\n content_type=\"application/json\",\n )\n response['Cache-Control'] = 'no-cache'\n return response",
"def albums(date, album_type, genre):\n urlhandle = f\"{amazon_charts_url}/albums\"\n params = {\n \"type\": album_type,\n \"date\": date,\n \"genre\": genre,\n }\n\n data = utilities.RequestData(urlhandle, params)\n return utilities.RequestGet(data)[\"data\"]",
"def simple_songs_list(name_of_album):\r\n songs = []\r\n data1 = dbase()\r\n data1 = data1[name_of_album][0]\r\n for song in data1.keys():\r\n songs += [song]\r\n return songs",
"def get_tracks_from_albums(sp, album_uri_list):\n\n track_list = [[\"track_name\", \"track_uri\", \"track_release_date\"]]\n\n print(\"Log: Pulling data from Spotify. This can take a while...\")\n\n for album_uri in album_uri_list:\n album_tracks = sp.album_tracks(album_uri, limit=50, offset=0)[\"items\"]\n count_tracks_in_album = len(album_tracks)\n album_release_date = sp.album(album_uri)[\"release_date\"]\n\n # This part is probably very slow and should be improved by accessing the API less often\n for track_number in range(count_tracks_in_album):\n track_name = album_tracks[track_number][\"name\"]\n track_uri = album_tracks[track_number][\"uri\"]\n \n track_list.append([track_name, track_uri, album_release_date])\n\n # Create df from list of tracks for all albums\n track_df = pd.DataFrame(data=track_list[1:], columns=track_list[0])\n \n print(\"Log: Finished pulling all tracks from albums.\")\n return track_df",
"def tracked_albums():\n print('Your Google Photos Albums ([X] = tracked):')\n albums = get_albums(service)\n for i, a in enumerate(albums):\n check = 'X' if a.id in library.get_album_ids() else ' '\n print('[{}] {}. {}'.format(check, i+1, a.title))\n return albums",
"def cmd_album_images(client, args):\n album_images = client.get_album_images(args.album_id)\n data = [item.__dict__ for item in album_images]\n generate_output({'album_images': data}, args.output_file)",
"def get_albums(playlist_name):\n\n playlist_id = find_playlist(playlist_name)\n \n items = get_playlist_tracks(playlist_id=playlist_id)\n \n track_values = []\n \n for item in items:\n track = item['track']\n album = track['album']\n artists = tuple(artist['name'] for artist in album['artists'])\n \n track_values.append((album['name'], artists[0]))\n \n album_details = namedtuple('AlbumDetails', 'album artist')\n \n for tup in dict.fromkeys(track_values):\n yield album_details(*tup)"
] |
[
"0.8165235",
"0.77157474",
"0.7463277",
"0.7329539",
"0.7285775",
"0.7235554",
"0.71896297",
"0.7168777",
"0.71157223",
"0.7096026",
"0.7090436",
"0.70471865",
"0.70387936",
"0.6880528",
"0.68339896",
"0.6679008",
"0.66074675",
"0.6587003",
"0.65148",
"0.6513741",
"0.64734447",
"0.64520377",
"0.6439934",
"0.64054096",
"0.6400348",
"0.63549346",
"0.6321143",
"0.6306885",
"0.62803304",
"0.62781984"
] |
0.81510186
|
1
|
Attempts a Redis DB connection and returns the DB Object
|
def dbConnect(self):
r = redis.StrictRedis()
try:
r = redis.from_url(os.environ.get("REDIS_URL"))
print("DB Connection seems okay!")
except Exception as error:
print ("Oops! An exception has occured:", error)
print ("Exception TYPE:", type(error))
r = None
finally:
return r
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def connect_to_db(self):\n r = redis.Redis(host=self.hostname,\n port=self.portnumber,\n password=self.password)\n try:\n r.ping()\n except redis.ConnectionError:\n sys.exit('ConnectionError: is the redis-server running?')\n self.r = r",
"def _conn_redis(self) -> Redis:\n return Redis(host=self._REDIS_DB_HOST, port=self._REDIS_DB_PORT, db=0,decode_responses=True)",
"def _connect(self):\n try:\n rcon = redis.StrictRedis(self._host, self._port, self._db)\n # Return the connection only if is valid and reachable\n if not rcon.ping():\n return None\n except (redis.ConnectionError, redis.RedisError) as exc:\n LOG.error(\"Failed to connect to Redis Server: %s\", exc)\n return None\n\n return rcon",
"def connect_to_redis():\n return Redis(host=redis_host, port=redis_port, db=0)",
"def setup(self):\n\t\ttry:\n\t\t\tdatabase = redis.StrictRedis(host=self.HOST, port=self.PORT, db=self.DB)\n\n\t\t\tself.logger.info(\"Successfully established Redis connection.\")\n\n\t\t\treturn database\n\n\t\texcept redis.exceptions.ConnectionError as err:\n\t\t\traise err",
"def get_database(redis_host, redis_port, redis_pass):\n return redis.StrictRedis(host=redis_host, port=redis_port,\n password=redis_pass)",
"def connect_db():\n conexion = redis.StrictRedis(host='127.0.0.1', port= 6379, decode_responses=True, charset='utf-8')\n if (conexion.ping()):\n print (\"conectado al servidor de redis\")\n else:\n print(\"error...\")\n return conexion",
"def _get_conn(self):\n return redis.Redis(connection_pool=self.pool)",
"def conn_redis(host, port, db=0):\r\n r = redis.Redis(host=host, port=port, db=db)\r\n return r",
"def get_connection(self, params):\r\n return Redis(connection_pool=self.get_or_create_connection_pool(params))",
"def _connect(self):\n try: \n self.r = redis.StrictRedis(host=self.host, port=self.port, db=self.db)\n except:\n raise",
"def get_redis() -> redis.Redis:\n global redis_conn\n if not redis_conn:\n host = app.config.get(\"REDIS_HOST\", \"127.0.0.1\")\n port = app.config.get(\"REDIS_PORT\", \"6379\")\n db = app.config.get(\"REDIS_DB\", \"0\")\n redis_conn = redis.Redis(host=host, port=port, db=db)\n\n return redis_conn",
"def connection():\n global _connection\n if _connection is None:\n _connection = StrictRedis.from_url(REDIS_URL)\n return _connection",
"def db(self):\n if self._db is None:\n self._db = redis.StrictRedis.from_url(self._uri)\n self.strict_release.register(self._db)\n return self._db",
"def connect(self, **kwargs):\n\n self.__db = redis.Redis(**kwargs)\n try:\n self.__db.info()\n self.connected = True\n except redis.ConnectionError as e:\n self.logger.error(\"Failed to connect to Redis server: \", e)\n raise QueueNotConnectedError(e)\n\n return True",
"def _get_db(reconnect=False):\n global _db, _connection\n identity = get_identity()\n # Connect if not already connected\n if _connection.get(identity) is None or reconnect:\n _connection[identity] = _get_connection(reconnect=reconnect)\n\n if _db.get(identity) is None or reconnect:\n # _db_name will be None if the user hasn't called connect()\n if _db_name is None:\n raise ConnectionError('Not connected to the database')\n\n # Get DB from current connection and authenticate if necessary\n _db[identity] = _connection[identity][_db_name]\n if _db_username and _db_password:\n _db[identity].authenticate(_db_username, _db_password)\n\n return _db[identity]",
"def create_connection():\n # REDIS_URL is defined in .env and loaded into the environment by Honcho\n redis_url = os.getenv('REDIS_URL')\n # If it's not defined, use the Redis default\n if not redis_url:\n redis_url = 'redis://localhost:6379'\n urlparse.uses_netloc.append('redis')\n url = urlparse.urlparse(redis_url)\n return redis.StrictRedis(\n host=url.hostname,\n port=url.port,\n db=0,\n password=url.password\n )",
"def conn(self):\n if self._sentinel:\n return self._sentinel.master_for(self._sentinel_name)\n if not self._conn:\n self._conn = self.__redis_mod.StrictRedis(\n host=self._host, port=self._port, **self._conn_kwargs\n )\n return self._conn",
"def _connect(self):\n self.connection = RedisConnection(self.host, self.port, self.dbname)",
"def get_client(conn):\n # No database indicates a cluster connection\n if not conn.get('db', None):\n conn.pop('db', None)\n return connect_redis_cluster(conn)\n\n # Otherwise it's a regular redis connection\n return connect_redis(conn)",
"def get_redis_client(host='localhost', port=6379, db=0):\n host = os.environ.get('REDIS_HOST') or host\n port = os.environ.get('REDIS_PORT') or port\n return StrictRedis(host=host, port=port, db=db)",
"def connect_redis(conn):\n # Don't pass empty password to the client\n if not conn.get('password', None):\n conn.pop('password', None)\n\n return redis.StrictRedis(**conn)",
"def connect(self):\n if self.connection is not None:\n logger.info(\" connection: %s \" % (self.connection is not None))\n if not self.connection.opened():\n logger.info(\"connection is closed\")\n return self.reconect()\n\n if self.connection.opened():\n return self.connection\n try:\n self.connection = connect(**self.options)\n except Exception as e:\n logger.critical(\"Unable to connect to DB: {0}\".format(e.message))\n raise\n\n return self.connection",
"def connect_redis(uri):\n puri = urlparse.urlparse(uri)\n host = puri.hostname\n port = puri.port\n password = puri.password if puri.password else ''\n db_name = puri.path.split('/')[1]\n r = redis.Redis(host=host, port=port, password=password, db=db_name)\n assert r.ping()\n return r",
"def _get_connection(reconnect=False):\n global _connection\n identity = get_identity()\n # Connect to the database if not already connected\n if _connection.get(identity) is None or reconnect:\n try:\n _connection[identity] = Connection(**_connection_settings)\n except Exception, e:\n raise ConnectionError(\"Cannot connect to the database:\\n%s\" % e)\n return _connection[identity]",
"def test_passing_connection(self):\n Pet.init_db(Redis(host=REDIS_HOST, port=REDIS_PORT))\n self.assertIsNotNone(Pet.redis)",
"def get_db():\n if not hasattr(g, 'db_connection'):\n g.db_connection = connect_db()\n return g.db_connection",
"def create_redis_connection(app=None):\n\n if app:\n app.logger.info('Instantiated new redis connection.')\n\n redis_connection = redis.StrictRedis(\n host=\"localhost\",\n port=6379,\n db=0\n )\n\n if not redis_connection.exists('last_queue_idx'):\n redis_connection.set('last_queue_idx', 0)\n\n return redis_connection",
"def get_db(self):\n self.logger.info('in get_db()')\n try:\n return self.client[self.db_name]\n except Exception as e:\n self.logger.error(f'Error occurred while getting client {e}')",
"def _connect(self):\r\n if not self._db:\r\n import boto\r\n sdb = boto.connect_sdb()\r\n if not self.domain_name:\r\n self.domain_name = boto.config.get(\"DB\", \"sequence_db\", boto.config.get(\"DB\", \"db_name\", \"default\"))\r\n try:\r\n self._db = sdb.get_domain(self.domain_name)\r\n except SDBResponseError, e:\r\n if e.status == 400:\r\n self._db = sdb.create_domain(self.domain_name)\r\n else:\r\n raise\r\n return self._db"
] |
[
"0.77405405",
"0.77253574",
"0.7679741",
"0.7658954",
"0.7619239",
"0.74647945",
"0.7445185",
"0.74369836",
"0.74027646",
"0.73723024",
"0.73014516",
"0.723885",
"0.7207661",
"0.7056827",
"0.70409024",
"0.7014019",
"0.6989931",
"0.6985283",
"0.69585145",
"0.69442034",
"0.6850017",
"0.67983764",
"0.6788899",
"0.67850643",
"0.6781833",
"0.6743956",
"0.67383206",
"0.66456664",
"0.6631926",
"0.6625903"
] |
0.8348194
|
0
|
Converts short URL to an ID
|
def shortURLToId(self, shortURL):
id = 0
for i in shortURL:
val_i = ord(i)
if(val_i >= ord('a') and val_i <= ord('z')):
id = id*62 + val_i - ord('a')
elif(val_i >= ord('A') and val_i <= ord('Z')):
id = id*62 + val_i - ord('Z') + 26
else:
id = id*62 + val_i - ord('0') + 52
return id
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _shortenUrl(self, url):\n posturi = \"https://www.googleapis.com/urlshortener/v1/url\"\n headers = {'Content-Type' : 'application/json'}\n data = {'longUrl' : url}\n data = json.dumps(data)\n request = urllib2.Request(posturi,data,headers)\n response = urllib2.urlopen(request)\n response_data = response.read()\n shorturi = json.loads(response_data)['id']\n return shorturi",
"def short_url(lastid):\n number = lastid +100000000000\n bs62encoded = base62.encode(number)\n return 'https://abc.com/{id}'.format(id=str(bs62encoded))",
"def get_id_shortlink(link = None):\n choppedLink = legacy_check(link)\n id = None\n try:\n id = choppedLink[3] # or -1 instead of 3\n except:\n pass #dont care bout issues here\n return id",
"def decode(self, shortUrl):\n cleanedID = shortUrl[len(self.baseUrl)+len(self.prefix):]\n long_URL = self.storage[cleanedID]\n return long_URL",
"def id_from_url(url):\n return url.split('-')[-1].split('.html')[0]",
"def encode(shorturl_id: int) -> str:\n short_resource = []\n while shorturl_id > 0:\n character_index = shorturl_id % BASE\n short_resource.append(CHARACTER_SPACE[character_index])\n shorturl_id //= BASE\n return \"\".join(short_resource[::-1])",
"def get_id(self, url):\n return url.split('/')[-1]",
"def _short_id(video_id):\n return '-'.join(video_id.split('-')[0:2])",
"def gen_shorter_url(long_url):\n if long_url in URL_PAIR_STORE.long_url:\n return URL_PAIR_STORE.short_url[\n URL_PAIR_STORE.long_url == long_url]\n else:\n short_url = DOMAIN_NAME + '/' + do_hashing(long_url)\n new_entry = URLPair(\n id=gen_unique_id(),\n long_url=long_url,\n short_url=short_url,\n )\n insert_new_pairs(new_entry)\n return short_url",
"def long_to_short(self, url, url_mobile=None, url_tablet=None):\n\n temp_short = uuid4() #temporary short code so we can get lastworid after insert\n query = 'INSERT into urls(short,default_url,mobile_url,tablet_url) VALUES (\"{short}\",\"{url}\",\"{mobile}\",\"{tablet}\");'.\\\n format(short=temp_short, url=url,\n mobile=url_mobile, tablet=url_tablet)\n with sq.connect(self.DB) as conn:\n cursor = conn.cursor()\n try:\n cursor.execute(query)\n url_id = cursor.lastrowid + 1\n based_id = base36.encode(url_id)\n #Update to the definitive short url\n update_query = 'UPDATE urls SET short = \"{new_short}\" WHERE short = \"{temp_uuid}\";'.\\\n format(new_short=based_id, temp_uuid=temp_short)\n cursor.execute(update_query)\n return based_id\n except sq.OperationalError:\n print(\"ERROR\")\n return False\n except ValueError:\n return False",
"def _id_from_url(url):\n url = re.sub(r'\\?.*', '', url)\n video_id = url.split('/')[-2]\n return video_id",
"def decode(self, shortUrl: str) -> str:\n l = shortUrl\n \n tmp = l[-1]\n del l[-1]\n s=l[0]+\"//\"\n i = 2\n while i<len(l):\n s+=l[i]+\"/\"\n i+=1\n \n return s[:-1]",
"def return_shorter_url(url):\n # found out that the entries were coming over in this format: <http://www.someurl.com>\n full_url = f\"https://www.googleapis.com/urlshortener/v1/url?key={API_KEY}\"\n fixed_url = remove_chars.clean_text(url)\n payload = {\"longUrl\": fixed_url}\n headers = {\"content-type\": \"application/json\"}\n # making a post to google API\n r = requests.post(full_url, data=json.dumps(payload), headers=headers).json()\n return f\"Short URL: {r['id']}\"",
"def __create_short_url(self):\n last_short_url = Url.__load_last_short_url()\n short_url = self.__increment_string(last_short_url)\n Url.__save_last_short_url(short_url)\n return short_url",
"def retrieve(short_id):\n try:\n url = Url.get(short_id)\n\n url.update(actions=[\n url.hits.set(url.hits + 1),\n url.lastHit.set(datetime.utcnow())\n ])\n\n return jsonify({\n \"statusCode\": 301,\n \"location\": url.longURL\n })\n\n except:\n return jsonify({\"Error\", \"No Such ID\"})",
"def decode(self, shortUrl):\n v = shortUrl[20:len(shortUrl)]\n return (self.hash[int(v)])",
"def short_id(self):\n if self.short_id_missing:\n return \"0\" * settings.ID_LENGTH\n return str(self.id)[0:settings.ID_LENGTH]",
"def decode(self, shortUrl: str) -> str:\n url = shortUrl.split('/')[-1]\n idx = int(url)\n \n return self.reverse_map[idx]",
"def get_row_id_for_short_url(url):\n try:\n return short_url.decode_url(url)\n except:\n return -1",
"def id_from_url(url: str) -> str:\n parts = RedditBase._url_parts(url)\n try:\n comment_index = parts.index(\"comments\")\n except ValueError:\n raise InvalidURL(url) from None\n\n if len(parts) - 4 != comment_index:\n raise InvalidURL(url)\n return parts[-1]",
"def encode(self, longUrl):\n shortUrl = \"http://tinyurl.com/\" + str(hash(longUrl))\n self.decode_map[shortUrl] = longUrl\n return shortUrl",
"def decode(self, shortUrl):\n shortUrl = shortUrl[-6:]\n if shortUrl in self.short_to_long:\n return self.short_to_long[shortUrl]",
"def extract_id(url):\n trail_id = url.replace('https://www.trailforks.com/trails/','').replace('/','')\n return trail_id",
"def decode(short_url: str) -> int:\r\n result = 0\r\n for c in short_url:\r\n result = BASE * result + CODEX.find(c)\r\n return result",
"def shorten_id(id):\n if id.startswith('CN'):\n id = id[2:]\n if not id[-1].isdigit():\n id = id[:-1]\n return id",
"def _e_to_id(self, e):\n return (e.attrib['href']\n [(e.attrib['href']\n .rfind('/id')+3):]\n .replace('?mt=2', ''))",
"def decode(self, shortUrl: str) -> str:\n return self.lookup[shortUrl]",
"def encode(self, longUrl: str) -> str:\n ans = \"http://tinyurl.com/\" + hex(abs(hash(longUrl)))\n self.lookup[ans] = longUrl\n return ans",
"def get_clean_url(url, unique_id):\n search = f\"(.*{unique_id})\"\n return re.findall(search,url)[0]",
"def get_id_regular_link(link = None):\n #Legacy compatibility\n choppedLink = legacy_check(link)\n # dont bother if we are none.\n if link == None:\n return link\n\n vid_url_params = choppedLink[3].split(\"&\")\n # Search the id in the list of elements of the url\n vid = search_video_id(vid_url_params)\n\n # And dont forget the links with hashtags #\n vid = vid.split(\"#\")[0]\n\n return vid # change this var names TODO"
] |
[
"0.76266074",
"0.75862736",
"0.71807855",
"0.71714944",
"0.7136487",
"0.7108624",
"0.70438015",
"0.7000727",
"0.6966817",
"0.69377244",
"0.6835616",
"0.6831397",
"0.6816086",
"0.67460054",
"0.668461",
"0.6674245",
"0.6609759",
"0.66091233",
"0.65995246",
"0.65338206",
"0.65143776",
"0.6500559",
"0.6489143",
"0.6420655",
"0.6418576",
"0.64064986",
"0.6406259",
"0.63833284",
"0.6381516",
"0.6341076"
] |
0.86196995
|
0
|
probably the wrost way to parse this captcha
|
def get_captcha_reply(captcha):
def get_char_at(pos, captcha):
char_chars = [line[pos-1:pos] for line in captcha.split(b'\n')]
key = ''.join([ str(s, 'ascii') for s in char_chars])
if key == ' | ':
return get_char_at(pos+2, captcha)
if key == ' | .\\ ':
return get_char_at(pos+2, captcha)
return chars[key]
pos = 1
a, size = get_char_at(pos, captcha)
pos += size
pwn.log.info("a=%d" % a)
op, size = get_char_at(pos, captcha)
pos += size
pwn.log.info('op=%s' % op)
b, size = get_char_at(pos, captcha)
pos += size
pwn.log.info('b=%d' % b)
if op == '-':
return a - b
if op == '*':
return a * b
if op == '/':
return a / b
if op == '+':
return a + b
pwn.log.error("Ops not found (%s)" % op)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_captcha(self):\n res = self._limited_call(self._requests.get,\n constants.FA_ROOT + \"/captcha.jpg\")\n data = res.content\n return data",
"def handle_captcha(self):\n self.webdriver.save_screenshot('./out/captcha.png')\n sleep(20)\n\n with open('./out/captcha', 'r') as f:\n try:\n self.webdriver.find_element_by_xpath(\"//input[@aria-label='Type the text you hear or see']\").send_keys(f.read())\n except:\n log.error('Captcha input failed. Possibly incorrect captcha?')\n raise\n\n self.webdriver.find_element_by_xpath('//*[@id=\"identifierNext\"]').click()\n sleep(4)\n\n self.webdriver.find_element_by_css_selector(\"input[type=password]\").send_keys(self.bot.getPassword())",
"def get_captcha_challenge(http_body, \n captcha_base_url='http://www.google.com/accounts/'):\n contains_captcha_challenge = False\n captcha_parameters = {}\n for response_line in http_body.splitlines():\n if response_line.startswith('Error=CaptchaRequired'):\n contains_captcha_challenge = True\n elif response_line.startswith('CaptchaToken='):\n # Strip off the leading CaptchaToken=\n captcha_parameters['token'] = response_line[13:]\n elif response_line.startswith('CaptchaUrl='):\n captcha_parameters['url'] = '%s%s' % (captcha_base_url,\n response_line[11:])\n if contains_captcha_challenge:\n return captcha_parameters\n else:\n return None",
"def parse_captcha_string(captcha_string: str):\n try:\n if captcha_string.find('?') != -1:\n captcha_string = captcha_string[:captcha_string.find('?')]\n list_digits = captcha_string.split('+')\n if list_digits[1] == '':\n return None\n if int(list_digits[1]) > 25:\n list_digits[1] = list_digits[1][0]\n\n except (ValueError, IndexError) as error:\n print('Cant recognize captcha')\n print(error)\n else:\n return int(list_digits[0]) + int(list_digits[1])",
"def get_captcha_image(self, page_html) -> str:\n try:\n items = page_html.select('div[class=\"ddText\"]')\n result_items = re.findall(r'\\\"data:image.*\\\"', str(items[0]))\n result_items = str(result_items).replace(\"\\\"\", \"\")\n except Exception as e:\n raise e\n else:\n return result_items",
"def extractCaptcha(x, y, nameInfix=None, debug=False):\n\n\tif nameInfix == None:\n\t\tcaptchaName = \"./captcha/captcha_\" + str(datetime.datetime.now().isoformat()) + \".png\"\n\telse:\n\t\tcaptchaName = \"./captcha/captcha_\" + str(nameInfix) + \".png\"\n\n\treturn extractScreenPart(x-50, y+5, 170, 60, name=captchaName, debug=debug)",
"def _handle_verify_code(self):\n while True:\n # r = self.session.get(self._genimage_url.format(code=self.codestring))\n try:\n self.headers[\"Cookie\"] = \"__jsluid=%s; __jsl_clearance=%s; JSESSIONID=%s\" % (self._jsluid, self._jsl_clearance, self.jsessionid)\n vfcode_url = \"http://www.miitbeian.gov.cn/getVerifyCode?%s\" % random.randint(10, 90)\n logger.info(\"Downloading verification code pic: %s\", vfcode_url)\n request = urllib2.Request(vfcode_url,headers=self.headers)\n r = self.opener.open(request, timeout=20)\n s = r.read()\n for cookie in self.cookiejar:\n logger.info(\"Get Cookie step2: %s, %s\", cookie.name, cookie.value)\n if cookie.name == \"JSESSIONID\":\n self.jsessionid = cookie.value\n img_path = \"miitVerf/code.png\"\n with open(img_path, mode='wb') as fp:\n fp.write(s)\n fp.close()\n logger.info(\"Saved verification code to %s\", format(os.path.dirname(img_path)))\n break\n except Exception,e:\n logger.info(e)\n self.vcode = raw_input(\"Please input the captcha:\\n\")\n return self.vcode",
"def decoding_the_captcha(captcha, l1=7):\n im = Image.open(captcha)\n im = im.convert(\"RGB\")\n p1 = im.load()\n\n # Filtering the black dots\n for x in range(im.size[0]):\n for y in range(im.size[1]):\n if (p1[x, y][0] < l1) and (p1[x, y][1] < l1) \\\n and (p1[x, y][2] < l1):\n p1[x, y] = (0x80, 0x80, 0x80, 255)\n\n im.save(\"output.png\")\n im.close()",
"def captcha(self):\n notification.send_sms(message=message)\n notification.send_emails(emails=email, message=message)\n sleep(25)\n\n ### this code snippet is for reference only, not to be used ###\n # sleep(3)\n # captcha = self.driver.find_element_by_xpath('/html/body/div/iframe[0]')\n # self.driver.switch_to.frame(captcha)\n # captcha_loc = captcha.location\n # print(captcha_loc)\n # captcha_x = captcha_loc[\"x\"]\n # captcha_y = captcha_loc[\"y\"]\n # self.actions.tap_and_hold(captcha_x, captcha_y)\n # sleep(5)\n # self.actions.release(captcha_x, captcha_y)\n # self.search_input()",
"def bypass_captcha(self, rps):\n viewstate_pattern = r\"id=\\\"__VIEWSTATE\\\".*\\\"(.*)\\\"\"\n viewstategenerator_pattern = r\"id=\\\"__VIEWSTATEGENERATOR\\\".*\\\"(.*)\\\"\"\n CAPTCHA_PATTERN = r\"id=\\\"ctl00_ContentPlaceHolder1_ctl00_lblCapcha\\\".*?>(.*?)<\\/span>\"\n viewstate = re.search(viewstate_pattern, rps)\n if viewstate:\n viewstate = viewstate.group(1)\n else:\n print(\"VIEWSTATE value not found!\")\n viewstategenerator = re.search(viewstategenerator_pattern, rps)\n if viewstategenerator:\n viewstategenerator = viewstategenerator.group(1)\n captcha = re.search(CAPTCHA_PATTERN, rps)\n if captcha:\n captcha_text = captcha.group(1)\n print(\"[*] CAPTCHA -> [{}]\".format(captcha_text))\n payload = {\n 'ctl00$ContentPlaceHolder1$ctl00$txtCaptcha':captcha_text,\n '__VIEWSTATE':viewstate,\n '__VIEWSTATEGENERATOR':viewstategenerator,\n '__EVENTARGUMENT':'',\n '__EVENTTARGET':'',\n 'ctl00$ContentPlaceHolder1$ctl00$btnXacNhan': 'Vào website'\n }\n rps = self.session.post(url = home_url, headers = BROWSER_HEADERS, data=payload)\n if CAPTCHA_ELEMENT_ID not in rps.text:\n print(\"[*] CAPTCHA BYPASSED\")\n return True\n else:\n print(\"CAPTCHA NOT BYPASSED! PLEASE REPORT TO DEVELOPER BACHVKHOA!\")\n else:\n print(\"[*] CAPTCHA NOT FOUND\")\n return False",
"def parse(self, response):\n if self._has_captcha(response):\n result = self._handle_captcha(response, self.parse)\n else:\n result = super(AmazonBaseClass, self).parse(response)\n\n return result",
"def get_sms_captcha(self, img_ts, img_captcha):\n url = \"http://api.applezhuan.com/api/get_sms_captcha?&\"\n params = {\n \"img_captcha\": img_captcha,\n \"time\": self.get_current_time,\n \"ts\": img_ts,\n \"device_code\": self.device_code,\n \"mobile\": self.mobile.mobile\n }\n params_str = self.encrypt.get_secret_param(params)\n url = url + \"s=\" + params_str\n headers = {\n \"Accept-Language\": \"zh-CN,zh;q=0.8\",\n \"User-Agent\": \"Mozilla/5.0 (Linux; U; Android \" + self.mobile.os + \"; zh-cn; GT-N7100 Build/\" +\n self.mobile.brand + \") AppleWebKit/534.30 (KHTML, like Gecko) \"\n \"Version/4.0 Mobile Safari/534.30\",\n \"Host\": \"api.applezhuan.com\",\n \"Connection\": \"Keep-Alive\",\n \"Accept-Encoding\": \"gzip\",\n \"Cookie\": self.cookie\n }\n\n res = requests.get(url, headers=headers)\n # print(res.text)\n result = json.loads(res.text)\n return result",
"def _hackBotchedCard(self, card, res):\n\t\tmat = re.match(r\"([^\\s=]*)\\s*=\\s*([^/]+)\", card.cardimage)\n\t\tif mat:\n\t\t\tres[mat.group(1)] = mat.group(2).strip()\n\t\telse: # Card beyond recognition, ignore\n\t\t\tpass",
"def solve_image_captcha(self, captcha_tmp_path):\n # Get solution and apply it\n for i in range(1, 4):\n print(f\"Attempt #{i} for recaptcha solution\")\n solution = self.obtain_image_captcha(captcha_tmp_path)\n print(f'this {solution}')\n if solution and ERROR not in solution.upper():\n break\n\n if solution is None or ERROR in solution.upper():\n if not solution:\n message = f\"2Captcha service didn't return a response for the captcha\"\n else:\n message = f\"Error in captcha solution from 2Captcha: {solution}\"\n return None\n\n print(\"Captcha solution: {}\".format(solution))\n return solution",
"def handle_captcha(thread_call, thread_r):\n import subprocess\n\n iden = thread_r['captcha']\n\n subprocess.call(['open', reddit_url + 'captcha/' + iden])\n thread_call['captcha'] = input(\"Captcha (enclose in quotes):\")\n thread_call['iden'] = iden\n\n request = session.post(reddit_url + 'api/submit', data=thread_call, cookies=cookie)\n thread_r = request.json()['json']['data']\n print request.json()\n if len(thread_r['errors']) > 0:\n debug_printer.pprint(thread_r)",
"def solve_captcha(self):\n # Switch to the Captcha's iframe\n captcha = CapatchaSolver(self.driver)\n while True:\n self.driver.switch_to.frame(self.driver.find_element_by_tag_name(\"iframe\"))\n captcha.solve_captcha()\n # Check if we passed the captcha part by checking the page title\n wait = WebDriverWait(self.driver, 10)\n try:\n wait.until_not(EC.title_is(consts.BLOCKED))\n break\n except TimeoutException:\n self.driver.refresh()",
"def solve_captcha_manual(gid):\n image = auth.get_captcha_image(gid)\n # FIXME: Use Python's temp file interface.\n image.save(\"./test.png\")\n webbrowser.open_new_tab(\"./test.png\")\n text = input('solve_captcha --->')\n return text",
"def handle_verify_code(self, code):\n r = self.session.get(self.image_url_format.format(code=code))\n\n # FIXME use terminal better\n img_path = os.path.expanduser('~/') + 'pansh.{}.vcode.png'.format(hash(self.username))\n with open(img_path, mode='wb') as fp:\n fp.write(r.content)\n print(\"Saved verification code to {}\".format(os.path.dirname(img_path)))\n vcode = raw_input(\"Please input the captcha:\\n\")\n return vcode",
"def get_image_response(self, captcha_id):\n url = 'http://2captcha.com/res.php'\n data = {'key': self.api_key, 'action': 'get',\n 'id': captcha_id, 'json': 1}\n response = self.session.post(url, data=data)\n json_response = json.loads(response.text)\n recaptcha_answer = json_response[\"request\"]\n finished = False\n for _ in range(20): # For making up to 120 seconds of waits\n if 'CAPCHA_NOT_READY' not in response.text:\n finished = True\n break\n # Time Requested by the web page\n sleep(6)\n response = self.session.post(url, data=data)\n json_response = json.loads(response.text)\n recaptcha_answer = json_response[\"request\"]\n\n if not finished:\n return False\n\n return recaptcha_answer",
"def writerep_general(contact_link, i):\n\n b = browser.Browser()\n print \"In writerep_general, opening contact_link\", contact_link\n b.open(contact_link)\n\n def get_challenge():\n ''' find captchas'''\n labels = b.find_nodes('label', lambda x: x.get('for') == 'HIP_response')\n if labels: return labels[0].string\n \n def fill_inhofe_lgraham(f):\n \"\"\"special function to fill in forms for inhofe and lgraham\"\"\"\n if DEBUG: print \"Filling special inhofe or lgraham form\"\n f.fill_all(A01=i.prefix, B01=i.fname, C01=i.lname, D01=i.addr1, E01=i.addr2, F01=i.city,\n G01=i.state, H01=i.zip5, H02=i.phone, H03=i.phone, I01=i.email, J01=\"Communications\", K01=i.full_msg)\n f.fill(type='textarea', value=i.full_msg)\n if DEBUG: print \"f filled and ready to submit: \", f\n \n def fill_form(f):\n ''' f is a form '''\n\n f.fill_name(i.prefix, i.fname, i.lname)\n if DEBUG: print \"in fill_form, filling addr\"\n f.fill_address(i.addr1, i.addr2)\n if DEBUG: print \"in fill_form, filling phone\"\n f.fill_phone(i.phone)\n if DEBUG: print \"in fill_form, filling textarea\"\n textareacontrol = f.fill(type='textarea', value=i.full_msg)\n if DEBUG: print 'filled textareacontrol' , textareacontrol\n if DEBUG: print \"in fill_form, filling all\"\n\n if DEBUG: print \"Printing all controls\"\n for c in f.controls:\n if DEBUG: print \"control: \", c.name, \" type: \", c.type\n \n f.fill_all(city=i.city, zipcode=i.zip5, zip4=i.zip4, state=i.state.upper(),\n email=i.email,\n issue=['TECH', 'GEN', 'OTH'],\n subject=i.subject, reply='yes',\n Re='issue', #for billnelson\n newsletter='noAction', aff1='Unsubscribe',\n MessageType=\"Express an opinion or share your views with me\")\n\n # page has one required control that has no name. so we need to fill it in\n if (i.dist == 'SD-00' or 'coburn' in b.url):\n empty_controls = [c for c in f.controls if not c.value]\n for c in empty_controls:\n if DEBUG: print f.fill('OTH', control=c)\n\n \n\n\n # Solve captchas. I included this here because it was placed here by Aaron,\n # but I haven't found a captcha that it works on. -NKF\n challenge = get_challenge()\n if challenge:\n print \"Found challenge!\"\n try:\n solution = captchasolver.solve(challenge)\n except Exception, detail:\n print >> sys.stderr, 'Exception in CaptchaSolve', detail\n print >> sys.stderr, 'Could not solve:\"%s\"' % challenge,\n \n if DEBUG: print \"f filled and ready to submit to \", b.url, \"\\n\", f\n #return b.open(f.click())\n \n \n\n # max loops\n k = 6\n\n # needed this from some weird error that I forgot to document.\n # we only want to do the WYR form once,\n # so it's a flag so we don't choose this one again. \n completedWyrForm = False\n for cnt in range(1,k):\n # todo, place newurl into cache\n if DEBUG: print \"Loop \", cnt, \":\\n\", b.url, \"\\n\" #, b.page, \"\\n Done with page \", cnt, \"\\n\\n\"\n\n # check if this is a refresh page\n # to do: see if we can get javascript window.location refreshes\n # (would require some smart parsing or using a javascript interpreter module)\n if 'http-equiv=\"refresh\"' in b.page:\n if DEBUG: print \"Redirect to a new page:\"\n newurl = r_refresh.findall(b.page)[0]\n newurl = newurl.replace(' ', '%20')\n newurl = newurl.replace('&', '&')\n if DEBUG: print \"\\nNewurl:\", newurl\n try:\n b.open(newurl)\n continue #next loop\n except:\n print \"Failed to open url \", newurl, \" error: \", traceback.print_exc()\n\n # some pages have multiple forms on them.\n # For example, there may be a search tool in the sidebar.\n # or there may be forms which are hidden by not displayed by the css.\n # try to see what we can grab out the page, then we'll decide which one's the best to try\n textareaform = get_form(b, lambda f: f.find_control_by_type('textarea'))\n zipform = get_form(b, lambda f: f.has(name='zip'))\n verificationform = get_form(b, lambda f: 'formproc' in f.action)\n nameform = get_form(b, lambda f: 'wrep_const' in f.action) #see AL-06 for an example, has zip form in page too\n wyrform = get_form(b, lambda f: f.find_control_by_id('state') and f.find_control_by_name('zip') and f.find_control_by_name('zip4')) #get_form(b, not_signup_or_search)\n indexform = get_form(b, lambda f: f.has(name='Re')) # see billnelson for example\n\n #choose which form we want to use\n form = None\n if textareaform:\n if DEBUG: print \"textareaform\"\n form = textareaform\n elif wyrform and not completedWyrForm:\n if DEBUG: print \"wyrform\"\n form = wyrform\n completedWyrForm = True\n elif nameform:\n if DEBUG: print \"step2 contact form with name\"\n form = nameform\n elif zipform:\n if DEBUG: print \"zipform\"\n form = zipform\n elif verificationform:\n if DEBUG: print \"verification form\"\n form = verificationform\n elif indexform:\n if DEBUG: print \"index form\"\n form = indexform\n\n #if no redirect and no form was found, just return. can go no further\n if not form:\n return b.page\n \n \n #to do, add back in captcha solver\n if form.find_control_by_name('captcha') or form.find_control_by_name('validation'):\n if DEBUG: print \"captcha found\"\n #raise Captcha\n return b.page\n else:\n if DEBUG: print \"no captcha found\"\n\n #try:\n if DEBUG: print \"going to fill_form from \", b.url, \" now \\n\", form, \"\\n End form\", cnt, \"\\n\"\n if \"inhofe\" in contact_link or \"lgraham\" in contact_link:\n fill_inhofe_lgraham(form)\n else:\n fill_form(form) #, aggressive=True)\n\n try:\n nextpage = b.open(form.click())\n except:\n print \"caught an http error\"\n print \"Failed to submit form for url \", b.url, \" error: \", traceback.print_exc()\n return \"Failed to submit form for url \"+ b.url+ \" error: \"+ traceback.format_exc()\n\n \n # Now, look for common errors or confirmations.\n foundError = False\n thanked = False\n if DEBUG: print \"Looking for errors in page \" #, b.page\n \n errorStr = getError(b.page)\n if errorStr:\n if DEBUG: print \"Found error: \", errorStr, \" done with \", contact_link\n foundError = True\n\n if DEBUG: print \"Looking for thank you in page: \"# , nextpage.lower()\n confirmations=[cstr for cstr in confirmationStrings if cstr in nextpage.lower()]\n\n if len(confirmations) > 0:\n print 'thanked, done with ', contact_link\n thanked = True\n\n successUrls = ['https://mulvaneyforms.house.gov/submit-contact.aspx']\n if b.url in successUrls:\n thanked = True\n\n if thanked or foundError:\n return nextpage\n\n if DEBUG: print \"Tried \", k, \"times, unsuccessfully, to fill form\"\n return b.page\n #raise UnsuccessfulAfter5Attempts(b.page) ",
"def receive_capturing_validation(self):\n reply = self.socket.recv(1)\n if reply[0] == codes['timeout']:\n print(\"Ocurrió un timeout en la conexión\")\n self.close_connection()\n if bytes_to_int(reply) == codes['already_have_all']:\n print(\"Ya tenías todos los pokémones. Has completado el juego.\")\n self.receive_session_termination()\n\n elif bytes_to_int(reply) == codes['already_have_pokemon']:\n print(\"Ya tienes el pokémon sugerido. Intentaré encontrarte otro.\")\n self.receive_pokemon_suggestion()\n\n elif bytes_to_int(reply) == codes['do_not_have_pokemon']:\n print(\"Tu pokédex no reconoce a este pokémon. Intenta capturarlo!\")\n captured = False\n while not captured:\n captured = self.verify_capture()\n if captured:\n break\n again = \"\"\n while again != \"y\" and again != \"n\":\n again = input(\"Quieres tratar de nuevo? (y/n): \")\n if again == \"n\":\n self.socket.sendall(pack('B', codes['no']))\n self.receive_session_termination()\n elif again == \"y\":\n self.socket.sendall(pack('B', codes['yes']))\n if captured:\n print(\"Lo capturaste\")\n self.receive_image()\n self.receive_session_termination()",
"def getTasseledCap(img):",
"def corp_image(self):\n try:\n # Open image\n image_to_crop = Image.open(self.captcha_image_filename, 'r')\n # Crop image\n image = image_to_crop.crop((-1, 8, 65, 22))\n # Save image\n image.save(self.cropped_captcha_filename)\n except UnidentifiedImageError as error:\n raise(error)",
"def split_dotted_f(captcha):\n # Cropping captcha so that the first letter (f) is not included\n image = captcha[19:46, 36:]\n \n col_sum = np.sum(image, axis = 0)\n col_sum_list = list(col_sum)\n\n # Finding all the dark regions\n # beggining and end of all dark regions)\n x = 1\n i = 0\n dark_regions = []\n while i < 164:\n if col_sum_list[i] == 0:\n dark_region_beg = i\n while col_sum_list[i + x] == 0:\n x = x + 1\n if (x + i) > 163:\n break\n dark_region_end = i + x - 1\n dark_region = (dark_region_beg, dark_region_end)\n dark_regions.append(dark_region)\n i = x + i + 1\n x = 1\n else:\n i = i + 1\n\n # Identifying leftmost and rightmost dark regions and popping them out of the list\n left_region = dark_regions[0]\n right_region = dark_regions[-1]\n dark_regions.pop(0)\n dark_regions.pop(-1)\n\n # Sorting dark regions according to their length\n four_regions = sorted(dark_regions, key = lambda x: x[1] - x[0], reverse = True)\n\n gaps = []\n lines = []\n for i, region in enumerate(four_regions):\n gap = mt.ceil((region[1] - region[0]) / 2)\n if gap == 0:\n continue\n gaps.append(gap)\n lines.append(region[0] + gap)\n\n # If more than 4 remaining gaps are identified, the problem may be due to split letters\n # Some of the troublesome letters are m, n and h\n # We will try to fix this issue by completing gaps in these letters\n if len(lines) > 4:\n\n for i in range(len(col_sum_list[:-9])):\n if col_sum_list[i:i+9] == [0, 0, 0, 0, 510, 510, 0, 3060, 3060]:\n captcha[28:30, i+1:i+3] = 255\n if col_sum_list[i:i+9] == [0, 0, 0, 0, 510, 510, 0, 2550, 2550]:\n captcha[31:33, i+1:i+3] = 255\n if col_sum_list[i:i+9] == [0, 3060, 3060, 0, 510, 510, 0, 0, 0, 0]:\n captcha[28:30, i+7:i+9] = 255\n if col_sum_list[i:i+9] == [0, 2550, 2550, 0, 510, 510, 0, 0, 0, 0]:\n captcha[31:33, i+7:i+9] = 255\n if col_sum_list[i:i+9] == [0, 4080, 4080, 0, 0, 0, 0, 510, 510]:\n captcha[31:33, i+4:i+6] = 255\n\n # Reloading image (based on modified captcha) and redefiding col_sum_list\n image = captcha[19:46, 36:]\n col_sum_list = list(np.sum(image, axis = 0))\n\n # Finding all the dark regions\n # beggining and end of all dark regions)\n x = 1\n i = 0\n dark_regions = []\n while i < 164:\n if col_sum_list[i] == 0:\n dark_region_beg = i\n while col_sum_list[i + x] == 0:\n x = x + 1\n if (x + i) > 163:\n break\n dark_region_end = i + x - 1\n dark_region = (dark_region_beg, dark_region_end)\n dark_regions.append(dark_region)\n i = x + i + 1\n x = 1\n else:\n i = i + 1\n\n # Identifying leftmost and rightmost dark regions and popping them out of the list\n left_region = dark_regions[0]\n right_region = dark_regions[-1]\n dark_regions.pop(0)\n dark_regions.pop(-1)\n\n # Sorting dark regions according to their length\n four_regions = sorted(dark_regions, key = lambda x: x[1] - x[0], reverse = True)\n\n # Building a list of GAPS (lengths of the dark regions)\n # and LINES that split such gaps in half\n gaps = []\n lines = []\n for i, region in enumerate(four_regions):\n gap = mt.ceil((region[1] - region[0]) / 2)\n if gap == 0:\n continue\n gaps.append(gap)\n lines.append(region[0] + gap)\n\n # If the errors persists, we move on to next captcha\n if len(lines) > 4:\n return('error')\n\n # If the algorithm finds less letters than expected (merged letters), we move on to next captcha\n if len(lines) < 4:\n return('error')\n\n # Defining rightmost and leftmost lines, appending lines list, and sorting\n left_line = 0\n right_line = right_region[0] + 2\n lines.append(left_line)\n lines.append(right_line)\n lines = sorted(lines)\n\n # Adjusting coordinates to account for deleting first letter\n lines = list(map(lambda x: x + 36, lines))\n\n # Finding letters x-coordinates (coordinates for initial r are already included)\n letters_xcoords = [(26, 37)]\n for i in range(len(lines)):\n if lines[i] == lines[-1]:\n break\n letter = (lines[i], lines[i + 1])\n letters_xcoords.append(letter)\n\n # Finding letters in the captcha, using the x-coordinates\n letters = []\n for i, letter in enumerate(letters_xcoords):\n letter_image = captcha[:60, letter[0]:letter[1]]\n letters.append(letter_image)\n\n return(letters)",
"def captcha_validation(token: str):\n url = \"https://www.google.com/recaptcha/api/siteverify\"\n secret = json.loads(get_secret(\"CAPTCHA_SECRET\"))['CAPTCHA_SECRET']\n payload = {\n \"secret\": secret,\n \"response\": token\n }\n response_raw = requests.post(url, data=payload)\n response_text = response_raw.text\n logger.debug(response_text)\n response = json.loads(response_text)\n return response['success']",
"def generate_challenge(self):\n return None",
"def _validate_captcha(data):\n settings = api.config.get_settings()[\"captcha\"]\n\n post_data = urllib.parse.urlencode(\n {\n \"secret\": settings[\"reCAPTCHA_private_key\"],\n \"response\": data[\"g-recaptcha-response\"],\n \"remoteip\": flask.request.remote_addr,\n }\n ).encode(\"utf-8\")\n\n request = urllib.request.Request(settings[\"captcha_url\"], post_data, method=\"POST\")\n response = urllib.request.urlopen(request).read().decode(\"utf-8\")\n parsed_response = json.loads(response)\n return parsed_response[\"success\"] is True",
"def parse_kiss(self):\n frame_len = len(self.frame)\n\n if frame_len < 16:\n self._logger.debug('Frame len(%s) < 16, Exiting.', frame_len)\n return\n\n for raw_slice in range(0, frame_len):\n\n # Is address field length correct?\n # Find the first ODD Byte followed by the next boundary:\n if (ord(self.frame[raw_slice]) & 0x01\n and ((raw_slice + 1) % 7) == 0):\n\n i = (raw_slice + 1) / 7\n\n # Less than 2 callsigns?\n if 1 < i < 11:\n # For frames <= 70 bytes\n if frame_len >= raw_slice + 2:\n if (ord(self.frame[raw_slice + 1]) & 0x03 == 0x03 and\n ord(self.frame[raw_slice + 2]) in\n [0xf0, 0xcf]):\n self._extract_kiss_text(raw_slice)\n self._extract_kiss_destination()\n self._extract_kiss_source()\n self._extract_kiss_path(i)",
"def ocr_correction(token):",
"def obtain_image_captcha(self, file_path):\n id_answer = self.post_image_task(file_path)\n if not id_answer:\n message = f\"Unable to obtain response for request of captcha from 2Captcha\"\n print(message)\n return None\n\n try:\n captcha_id = int(id_answer)\n except ValueError:\n message = f\"Error in captcha request from 2Captcha: {id_answer}\"\n print(message)\n return None\n\n recaptcha_answer = self.get_image_response(captcha_id)\n if not recaptcha_answer:\n message = f\"Unable to obtain response for captcha image solution from 2Captcha\"\n print(message)\n return None\n\n print(f\"Output from 2Captcha {recaptcha_answer}\")\n return recaptcha_answer"
] |
[
"0.62265563",
"0.6219252",
"0.61916035",
"0.6096624",
"0.60874146",
"0.59667766",
"0.59021354",
"0.58705884",
"0.5825633",
"0.5809599",
"0.57755667",
"0.5746426",
"0.5594208",
"0.5538595",
"0.55344874",
"0.55010104",
"0.5449904",
"0.53967845",
"0.53909343",
"0.5288578",
"0.5287296",
"0.5286236",
"0.52829856",
"0.5254987",
"0.5246941",
"0.5169091",
"0.5159369",
"0.5097183",
"0.5085489",
"0.5080703"
] |
0.67830896
|
0
|
The Amazon Resource Name (ARN) of the custom platform to use with the environment.
|
def platform_arn(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "platform_arn")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def platform_arn(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"platform_arn\")",
"def platform(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"platform\")",
"def platform():\n return \"micaz\"",
"def PlatformName():\n if override_platform_name:\n return override_platform_name\n if IsWindows():\n return 'win32'\n if IsLinux():\n return 'linux'\n if IsMac():\n return 'mac'\n raise NotImplementedError('Unknown platform \"%s\".' % sys.platform)",
"def product(self):\n return self.appName",
"def name(cls):\n\n system = platform.system()\n\n # Apply system map\n if system in NAME_MAP:\n system = NAME_MAP[system]\n\n return system",
"def GetOSName():\n return Config.osName_",
"def environment_label(self) -> str:\n return self._environment_label",
"def platform_config_filename(region, account_prefix, prod):\n return 'infra/platform-config/%s/%s/%s.json' % (\n account_prefix, \"prod\" if prod else \"dev\", region\n )",
"def GetPlatform(self):\n arch = \"None\"\n # check architecture name\n if \"CMTCONFIG\" in os.environ:\n arch = os.environ[\"CMTCONFIG\"]\n elif \"SCRAM_ARCH\" in os.environ:\n arch = os.environ[\"SCRAM_ARCH\"]\n return arch",
"def brand(self):\n return \"Nest Labs\"",
"def name(self) -> str:\n return f\"{self.platform_name} {self._sensor_name}\"",
"def platform(self):\n return self.random.choice([\n 'Laptop', \n 'Desktop', \n 'Workstation', \n 'Server', \n 'Virtual Machine', \n 'Container', \n 'Micro-Service', \n 'Droplet', \n 'SaaS'\n ])",
"def os_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"os_name\")",
"def app_image_config_arn(self) -> Optional[str]:\n return pulumi.get(self, \"app_image_config_arn\")",
"def application_arn(self) -> Optional[str]:\n return pulumi.get(self, \"application_arn\")",
"def platform_num(self) -> str:\n return pulumi.get(self, \"platform_num\")",
"def architecture(self) -> str:\n return pulumi.get(self, \"architecture\")",
"def architecture(self) -> str:\n return pulumi.get(self, \"architecture\")",
"def application_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_name\")",
"def application_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_name\")",
"def platform(self):\n # type: () -> string_types\n return self._platform",
"def architecture_name(self):\n return get_architecture_name(self.architecture)",
"def get_os() -> str:\n system = platform.system().lower()\n\n if system == \"linux\":\n machine = os.uname().machine\n if machine.startswith(\"arm\") or machine.startswith(\"aarch\"):\n system = \"pi\"\n\n return system + \"_\" + platform.architecture()[0]",
"def get_name():\n return config.APP_NAME",
"def platform(self, return_str=True):\n architecture = self.arch(\"docker\")\n host_platform = self.osversion() + \"/\" + architecture\n if return_str:\n return host_platform.lower()\n return self.parse_platform(host_platform)",
"def get_os_name(cls):\n return cls.get_os_type().name",
"def application_name(self) -> Optional[str]:\n return pulumi.get(self, \"application_name\")",
"def environment_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"environment_name\")",
"def arn(self) -> str:\n return pulumi.get(self, \"arn\")"
] |
[
"0.7836344",
"0.6630474",
"0.66273826",
"0.6609992",
"0.6324445",
"0.6285871",
"0.623594",
"0.61513615",
"0.60816205",
"0.60486674",
"0.60041595",
"0.598996",
"0.59880674",
"0.596188",
"0.59613025",
"0.5961109",
"0.5934884",
"0.5923574",
"0.5923574",
"0.59224755",
"0.59224755",
"0.59121084",
"0.58728135",
"0.58551097",
"0.58390915",
"0.58338064",
"0.5812323",
"0.5772314",
"0.5770205",
"0.5742552"
] |
0.77357316
|
1
|
The name of an Elastic Beanstalk solution stack (platform version) to use with the environment.
|
def solution_stack_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "solution_stack_name")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def solution_stack_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"solution_stack_name\")",
"def stackname(self):\n return self.BASE_NAME.format(**self.conf)",
"def stack_name(self) -> str:\n return jsii.get(self, \"stackName\")",
"def env_name(self):\n return f\"{self.project_name}-{self.stage}\"",
"def stack_name(self):\n stack_name = getattr(self, '__stack_name', None)\n if (\n self.args.stack_name and\n not stack_name\n ):\n stack_name = self.args.stack_name\n elif not stack_name:\n stack_name = \"nephoria-stack-\" + str(int(time.time()))\n\n setattr(self, '__stack_name', stack_name)\n return stack_name",
"def name(self):\n return self._env_name",
"def kernel_name():\n return \"python3\"",
"def stack_name(self) -> str:\n return self._values.get(\"stack_name\")",
"def name_tag(resource_name):\n return Join(\"\", [Ref('AWS::StackName'), '-', resource_name])",
"def name(self):\n return get_env_name(self.tool_name,\n self._python,\n self._requirements,\n self._tagged_env_vars)",
"def environment_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"environment_name\")",
"def pipeline_stack_name(self) -> str:\n return self._values.get(\"pipeline_stack_name\")",
"def get_name():\n return config.APP_NAME",
"def environment_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"environment_name\")",
"def environment_label(self) -> str:\n return self._environment_label",
"def stack_name(self) -> typing.Optional[str]:\n return self._values.get(\"stack_name\")",
"def get_egg_name():\n global eggname\n if not eggname:\n version = local('git describe --abbrev=4', capture=True)\n if version:\n version = '%s-%s' % (version, datetime.datetime.today().strftime('%Y%m%d'))\n eggname = APP_NAME + '-%s-py%s.egg' % (version.replace('-', '_'), python_version)\n return eggname",
"def stackname(self):\n raise NotImplementedError",
"def env_name(pre_chars='(', post_chars=')'):\n env_path = builtins.__xonsh_env__.get('VIRTUAL_ENV', '')\n if len(env_path) == 0 and xp.ON_ANACONDA:\n env_path = builtins.__xonsh_env__.get('CONDA_DEFAULT_ENV', '')\n env_name = os.path.basename(env_path)\n if env_name:\n return pre_chars + env_name + post_chars",
"def python_name(self):\n return self.requirement.name",
"def docker_image_tag(self, app):\n return f\"briefcase/{app.bundle}.{app.app_name.lower()}:{app.target_vendor}-{app.target_codename}\"",
"def get_soc_name():\n return get_soc_spec(\"SOC_VERSION\")",
"def _get_deployment_flavor():\n flavor = cfg.CONF.paste_deploy.flavor\n return '' if not flavor else ('-' + flavor)",
"def get_launch_name():\n\n if product_type == \"RHEL7\":\n launch_name = \"Errata-{0}_{1}_{2}_{3}_{4}_{5}CDN\".format(errata_id, product_type, variant, arch, test_level, cdn)\n \n elif product_type == \"RHEL8\":\n launch_name = \"Errata-{0}_{1}_{2}_{3}_{4}CDN\".format(errata_id, product_type, arch, test_level, cdn)\n\n return launch_name",
"def container_image_name(registry, component_name, version):\n if version is None:\n image = component_name + ':dev'\n else:\n image = '%s/%s:%s' % (registry, component_name, version)\n\n return image",
"def stage_name(self) -> str:\n return pulumi.get(self, \"stage_name\")",
"def stage_name(self) -> str:\n return pulumi.get(self, \"stage_name\")",
"def generate_cluster_stack_name(job):\n return 'cluster-%s----%s' % (job.compute_resource.id, job.id)",
"def version_name(self) -> str:\n return pulumi.get(self, \"version_name\")",
"def bucket_dual_stack_domain_name(self) -> str:\n return jsii.get(self, \"bucketDualStackDomainName\")"
] |
[
"0.6850209",
"0.6835407",
"0.6518939",
"0.65049684",
"0.64019746",
"0.6248095",
"0.6167335",
"0.615564",
"0.6142327",
"0.6139697",
"0.6102911",
"0.6087402",
"0.60867625",
"0.60640186",
"0.6010799",
"0.59820807",
"0.592273",
"0.58995575",
"0.5882993",
"0.58522046",
"0.58299625",
"0.5815258",
"0.5814011",
"0.5812235",
"0.57710993",
"0.5765567",
"0.5765567",
"0.5765216",
"0.57643604",
"0.5695329"
] |
0.68761265
|
0
|
Specifies the tier to use in creating this environment. The environment tier that you choose determines whether Elastic Beanstalk provisions resources to support a web application that handles HTTP(S) requests or a web application that handles backgroundprocessing tasks.
|
def tier(self) -> Optional[pulumi.Input['EnvironmentTierArgs']]:
return pulumi.get(self, "tier")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def tier(self):\n\n if not hasattr(self, \"_tier\"):\n self._tier = self.opts.get(\"tier\")\n return self._tier",
"def tier(self) -> pulumi.Output[Optional['outputs.EnvironmentTier']]:\n return pulumi.get(self, \"tier\")",
"def set_tier(self, tier):\n self.single_selection_from_static_kendo_dropdown(self.tier_kendo_dropdown_locator, tier)",
"def tier(self) -> Optional[pulumi.Input['InstanceTier']]:\n return pulumi.get(self, \"tier\")",
"def tier(self) -> str:\n return pulumi.get(self, \"tier\")",
"def tier(self) -> str:\n return pulumi.get(self, \"tier\")",
"def tier(self) -> str:\n return pulumi.get(self, \"tier\")",
"def get_tier(self) -> str:\n tier = self.raw_param.get(\"tier\")\n if not tier:\n return \"\"\n\n tierStr = tier.lower()\n if tierStr == CONST_MANAGED_CLUSTER_SKU_TIER_FREE and self._get_uptime_sla(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--uptime-sla\" and \"--tier free\" at the same time.'\n )\n\n if tierStr == CONST_MANAGED_CLUSTER_SKU_TIER_STANDARD and self._get_no_uptime_sla(enable_validation=False):\n raise MutuallyExclusiveArgumentError(\n 'Cannot specify \"--no-uptime-sla\" and \"--tier standard\" at the same time.'\n )\n\n return tierStr",
"def tier(self, tier):\n\n self._tier = tier",
"def tier(self) -> Optional[str]:\n return pulumi.get(self, \"tier\")",
"def tier(self) -> Optional[str]:\n return pulumi.get(self, \"tier\")",
"def tier(self) -> Optional[str]:\n return pulumi.get(self, \"tier\")",
"def tier(self) -> Optional[str]:\n return pulumi.get(self, \"tier\")",
"def tier(self) -> Optional[pulumi.Input[Union[str, 'CapacitySkuTier']]]:\n return pulumi.get(self, \"tier\")",
"def tier(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tier\")",
"def tier(self) -> Optional[pulumi.Input[Union[str, 'VCoreSkuTier']]]:\n return pulumi.get(self, \"tier\")",
"def tier(self):\n return self._tier",
"def AddTier(parser, is_patch=False):\n help_text = (\n \"Machine type for a shared-core instance e.g. ``db-g1-small''. \"\n 'For all other instances, instead of using tiers, customize '\n 'your instance by specifying its CPU and memory. You can do so '\n 'with the `--cpu` and `--memory` flags. Learn more about how '\n 'CPU and memory affects pricing: '\n 'https://cloud.google.com/sql/pricing.'\n )\n if is_patch:\n help_text += ' WARNING: Instance will be restarted.'\n\n parser.add_argument('--tier', '-t', required=False, help=help_text)",
"def access_tier(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"access_tier\")",
"def configure_tiers(self, datacenter, tier):\n print \"Enabling tier %s...\" % tier\n tiers = datacenter.listTiers()\n\n tiers[0].setName(tier)\n tiers[0].update()\n\n for i in range(1, 4):\n tiers[i].setEnabled(False)\n tiers[i].update()\n\n return tiers[0]",
"def tier_number(self, tier_number):\n\n self._tier_number = tier_number",
"def run_on_tier(self, tier, tierY=None):\n raise NotImplementedError",
"def post(self, tier):\n\n if self._from_cluster:\n raise exception.OperationNotPermitted\n\n try:\n tier = tier.as_dict()\n LOG.debug(\"storage tier post dict= %s\" % tier)\n\n new_tier = _create(self, tier)\n except exception.SysinvException as e:\n LOG.exception(e)\n raise wsme.exc.ClientSideError(_(\"Invalid data: failed to create \"\n \"a storage tier object\"))\n\n return StorageTier.convert_with_links(new_tier)",
"def tier(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"tier\")",
"def price_tier(self):\n return self._safe_value(VAR_PRICETIER, str)",
"def __init__(__self__, *,\n name: str,\n tier: str):\n pulumi.set(__self__, \"name\", name)\n pulumi.set(__self__, \"tier\", tier)",
"def get_pvp_tier(self, region, namespace, tier_id, **filters):\n filters['namespace'] = namespace\n resource = 'data/wow/pvp-tier/{0}'\n return self.get_resource(resource, region, *[tier_id], **filters)",
"def tier_explanation(self, tier_explanation):\n\n self._tier_explanation = tier_explanation",
"def tier_2160p(self, tier_2160p):\n\n self._tier_2160p = tier_2160p",
"def create(self, callback=None):\n\n parms = [{'budget': self.budget,\n 'deployment': {'deploymentId': self.deployment},\n 'description': self.description,\n 'name': self.name,\n 'minimumServers': self.minimum_servers,\n 'maximumServers': self.maximum_servers,\n 'breachIncrement': self.breach_increment,\n 'breachPeriodInMinutes': self.breach_period_in_minutes,\n 'cooldownPeriodInMinutes': self.cooldown_period_in_minutes,\n 'lowerCpuThreshold': self.lower_cpu_threshold,\n 'upperCpuThreshold': self.upper_cpu_threshold,\n 'lowerRamThreshold': self.lower_ram_threshold,\n 'upperRamThreshold': self.upper_ram_threshold}]\n\n payload = {'addTier':camel_keys(parms)}\n\n response=self.post(data=json.dumps(payload))\n if self.last_error is None:\n self.load()\n return response\n else:\n raise TierCreationException(self.last_error)"
] |
[
"0.69780785",
"0.68621033",
"0.65023184",
"0.6452587",
"0.6355082",
"0.6355082",
"0.6355082",
"0.62773496",
"0.62122846",
"0.6167871",
"0.6167871",
"0.6167871",
"0.6167871",
"0.6113269",
"0.6034399",
"0.60080045",
"0.598394",
"0.5843236",
"0.5833414",
"0.5829682",
"0.5780073",
"0.57668823",
"0.5703262",
"0.5683552",
"0.56024295",
"0.5565308",
"0.55319",
"0.5365069",
"0.53202933",
"0.52909493"
] |
0.7524803
|
0
|
Get an existing Environment resource's state with the given name, id, and optional extra properties used to qualify the lookup.
|
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Environment':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = EnvironmentArgs.__new__(EnvironmentArgs)
__props__.__dict__["application_name"] = None
__props__.__dict__["cname_prefix"] = None
__props__.__dict__["description"] = None
__props__.__dict__["endpoint_url"] = None
__props__.__dict__["environment_name"] = None
__props__.__dict__["operations_role"] = None
__props__.__dict__["option_settings"] = None
__props__.__dict__["platform_arn"] = None
__props__.__dict__["solution_stack_name"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["template_name"] = None
__props__.__dict__["tier"] = None
__props__.__dict__["version_label"] = None
return Environment(resource_name, opts=opts, __props__=__props__)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Environment':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = EnvironmentArgs.__new__(EnvironmentArgs)\n\n __props__.__dict__[\"arm_template_display_name\"] = None\n __props__.__dict__[\"created_by_user\"] = None\n __props__.__dict__[\"deployment_properties\"] = None\n __props__.__dict__[\"location\"] = None\n __props__.__dict__[\"name\"] = None\n __props__.__dict__[\"provisioning_state\"] = None\n __props__.__dict__[\"resource_group_id\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"type\"] = None\n __props__.__dict__[\"unique_identifier\"] = None\n return Environment(resource_name, opts=opts, __props__=__props__)",
"def get_state_by_id(state_id):\n my_state = storage.get('State', state_id)\n if my_state is None:\n abort(404)\n return jsonify(my_state.to_dict())",
"def get_state_by_id(state_id):\n for key, value in storage.all(\"State\").items():\n if state_id == value.id:\n return jsonify(value.to_dict())\n abort(404)",
"def state_by_id(state_id):\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n return jsonify(state.to_dict())",
"def a_state(id):\n state = storage.get(State, id)\n if state is not None:\n return jsonify(state.to_dict())\n abort(404)",
"def get_state_by_id(state_id):\n state = storage.get(State, state_id)\n if not state:\n abort(404)\n return jsonify(state.to_dict()), 200",
"def state_by_id(state_id):\n states_values = storage.all(\"State\").values()\n for obj in states_values:\n if obj.id == state_id:\n return jsonify(obj.to_dict())\n abort(404)",
"def get_state(state_id):\n try:\n ''' Check that state_id exists '''\n query = State.select().where(State.id == state_id)\n if not query.exists():\n raise LookupError('state_id')\n\n state = State.get(State.id == state_id)\n return state.to_dict(), 200\n except LookupError as e:\n abort(404)\n except Exception as e:\n abort(500)",
"def _get_env(cls, name: str) -> ApiEnvironment:\n envs = {e.name: e for e in cls._envs} # type: ignore\n if name not in envs:\n raise KeyError(f\"Invalid environment '{name}'. Choose from {list(envs.keys())}.\")\n return envs[name]",
"def get_one_state(state_id):\n state = storage.get('State', state_id)\n if state is None:\n abort(404)\n if request.method == 'DELETE':\n storage.delete(state)\n storage.save()\n return jsonify({}), 200\n elif request.method == 'PUT':\n try:\n res_dict = request.get_json()\n res_dict['id'] = state.id\n res_dict['created_at'] = state.created_at\n state.__init__(**res_dict)\n state.save()\n return jsonify(state.to_dict()), 200\n except:\n abort(400, description='Not a JSON')\n return jsonify(state.to_dict())",
"def get_state_by_name(exploration_id, state_name, strict=True):\n exploration = get_exploration_by_id(exploration_id)\n assert state_name\n\n # TODO(sll): This is too slow; improve it.\n state = None\n for candidate_state in exploration.states:\n if candidate_state.name == state_name:\n state = candidate_state\n break\n\n if strict and not state:\n raise Exception('State %s not found' % state_name)\n return state",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n force: Optional[pulumi.Input[bool]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n state: Optional[pulumi.Input[str]] = None) -> 'InstanceState':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _InstanceStateState.__new__(_InstanceStateState)\n\n __props__.__dict__[\"force\"] = force\n __props__.__dict__[\"instance_id\"] = instance_id\n __props__.__dict__[\"state\"] = state\n return InstanceState(resource_name, opts=opts, __props__=__props__)",
"def get_state(state_id):\n try:\n state = jsonify(storage.get(State, state_id).to_dict())\n return state\n except:\n abort(404)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n arn: Optional[pulumi.Input[str]] = None,\n auth_mode: Optional[pulumi.Input[str]] = None,\n default_s3_location: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n engine_security_group_id: Optional[pulumi.Input[str]] = None,\n idp_auth_url: Optional[pulumi.Input[str]] = None,\n idp_relay_state_parameter_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n service_role: Optional[pulumi.Input[str]] = None,\n subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n url: Optional[pulumi.Input[str]] = None,\n user_role: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n workspace_security_group_id: Optional[pulumi.Input[str]] = None) -> 'Studio':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _StudioState.__new__(_StudioState)\n\n __props__.__dict__[\"arn\"] = arn\n __props__.__dict__[\"auth_mode\"] = auth_mode\n __props__.__dict__[\"default_s3_location\"] = default_s3_location\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"engine_security_group_id\"] = engine_security_group_id\n __props__.__dict__[\"idp_auth_url\"] = idp_auth_url\n __props__.__dict__[\"idp_relay_state_parameter_name\"] = idp_relay_state_parameter_name\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"service_role\"] = service_role\n __props__.__dict__[\"subnet_ids\"] = subnet_ids\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"tags_all\"] = tags_all\n __props__.__dict__[\"url\"] = url\n __props__.__dict__[\"user_role\"] = user_role\n __props__.__dict__[\"vpc_id\"] = vpc_id\n __props__.__dict__[\"workspace_security_group_id\"] = workspace_security_group_id\n return Studio(resource_name, opts=opts, __props__=__props__)",
"def get_state(state_id):\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n return jsonify(state.to_dict())",
"def get_state(state_id):\n state = storage.get(\"State\", state_id)\n if state:\n return jsonify(state.to_dict())\n abort(404)",
"def get_state_by_id(state_id):\r\n response = Response(json.dumps(json_error(ResponsesREST.INVALID_INPUT.value)),\r\n status=ResponsesREST.INVALID_INPUT.value, mimetype=\"application/json\")\r\n if validator_id.is_valid({\"id\": state_id}):\r\n state_get = State()\r\n state_get.id_state = state_id\r\n result = state_get.get_state()\r\n if result in (ResponsesREST.NOT_FOUND.value, ResponsesREST.SERVER_ERROR.value):\r\n response = Response(json.dumps(json_error(result)),\r\n status=result, mimetype=\"application/json\")\r\n else:\r\n response = Response(json.dumps(result.json_state()),\r\n status=ResponsesREST.SUCCESSFUL.value,\r\n mimetype=\"application/json\")\r\n return response",
"def state_id(state_id):\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n else:\n return jsonify(state.to_dict())",
"def a_states_id(state_id):\n i = storage.get(\"State\", state_id)\n if i:\n return jsonify(i.to_dict())\n else:\n return (jsonify({\"error\": \"Not found\"}), 404)",
"def statesById(state_id):\n obj = storage.get(State, state_id)\n if obj:\n return jsonify(obj.to_dict())\n return jsonify({\"error\": \"Not found\"}), 404",
"def get(self, request, state_id, format=None):\n try:\n state = State.objects.get(id=state_id)\n except ObjectDoesNotExist:\n raise NotFound(detail=\"State not found\")\n\n return Response(StateSerializer(state).data)",
"def get_state_by_id(exploration_id, state_id, strict=True):\n # TODO(sll): Generalize this to handle multiple state_ids at a time.\n state_memcache_key = _get_state_memcache_key(exploration_id, state_id)\n memcached_state = memcache_services.get_multi(\n [state_memcache_key]).get(state_memcache_key)\n\n if memcached_state is not None:\n return memcached_state\n else:\n state_model = exp_models.StateModel.get(\n exploration_id, state_id, strict=strict)\n if state_model:\n state = exp_domain.State.from_dict(state_id, state_model.value)\n memcache_services.set_multi({state_memcache_key: state})\n return state\n else:\n return None",
"def lookup(job_id: str) -> JobState:\n job = JobState(job_id)\n job.update()\n return job",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n asset_statuses: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ZoneAssetStatusArgs']]]]] = None,\n create_time: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n discovery_spec: Optional[pulumi.Input[pulumi.InputType['ZoneDiscoverySpecArgs']]] = None,\n display_name: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n lake: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[str]] = None,\n resource_spec: Optional[pulumi.Input[pulumi.InputType['ZoneResourceSpecArgs']]] = None,\n state: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n uid: Optional[pulumi.Input[str]] = None,\n update_time: Optional[pulumi.Input[str]] = None) -> 'Zone':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ZoneState.__new__(_ZoneState)\n\n __props__.__dict__[\"asset_statuses\"] = asset_statuses\n __props__.__dict__[\"create_time\"] = create_time\n __props__.__dict__[\"description\"] = description\n __props__.__dict__[\"discovery_spec\"] = discovery_spec\n __props__.__dict__[\"display_name\"] = display_name\n __props__.__dict__[\"labels\"] = labels\n __props__.__dict__[\"lake\"] = lake\n __props__.__dict__[\"location\"] = location\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"project\"] = project\n __props__.__dict__[\"resource_spec\"] = resource_spec\n __props__.__dict__[\"state\"] = state\n __props__.__dict__[\"type\"] = type\n __props__.__dict__[\"uid\"] = uid\n __props__.__dict__[\"update_time\"] = update_time\n return Zone(resource_name, opts=opts, __props__=__props__)",
"def states_id(id=None):\n all_states = storage.all(State)\n foundstate = None\n for key, state in all_states.items():\n if state.id == id:\n foundstate = state\n break\n\n return render_template('9-states.html', States=all_states, ID=id,\n Stateobj=foundstate)",
"def view_state_id(state_id):\n states_obj = storage.all(\"State\")\n if request.method == 'GET':\n for state in states_obj.values():\n if state.id == state_id:\n id_found = state.to_dict()\n return jsonify(id_found)\n abort(404)\n\n if request.method == 'DELETE':\n for state in states_obj.values():\n if state.id == state_id:\n storage.delete(state)\n storage.save()\n return make_response(jsonify({}), 200)\n abort(404)\n\n if request.method == 'PUT':\n key = \"State.\" + state_id\n states = storage.all(\"State\")\n instance = states.get(key)\n if instance is None:\n abort(404)\n else:\n if not request.json:\n abort(400, \"Not a JSON\")\n req_var = request.get_json()\n for key, value in req_var.items():\n setattr(instance, key, value)\n storage.save()\n return make_response(jsonify(instance.to_dict()), 200)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n minimal_action: Optional[pulumi.Input[str]] = None,\n most_disruptive_allowed_action: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n preserved_state: Optional[pulumi.Input[pulumi.InputType['RegionPerInstanceConfigPreservedStateArgs']]] = None,\n project: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n region_instance_group_manager: Optional[pulumi.Input[str]] = None,\n remove_instance_state_on_destroy: Optional[pulumi.Input[bool]] = None) -> 'RegionPerInstanceConfig':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _RegionPerInstanceConfigState.__new__(_RegionPerInstanceConfigState)\n\n __props__.__dict__[\"minimal_action\"] = minimal_action\n __props__.__dict__[\"most_disruptive_allowed_action\"] = most_disruptive_allowed_action\n __props__.__dict__[\"name\"] = name\n __props__.__dict__[\"preserved_state\"] = preserved_state\n __props__.__dict__[\"project\"] = project\n __props__.__dict__[\"region\"] = region\n __props__.__dict__[\"region_instance_group_manager\"] = region_instance_group_manager\n __props__.__dict__[\"remove_instance_state_on_destroy\"] = remove_instance_state_on_destroy\n return RegionPerInstanceConfig(resource_name, opts=opts, __props__=__props__)",
"def state_by_id(id):\n states = storage.all('State').values()\n for state in states:\n if state.id == id:\n return render_template('9-states.html', states=state)\n return render_template('9-states.html')",
"def given_state(id):\n key = 'State.{}'.format(id)\n state = storage.all(State).get(key)\n return render_template('9-states.html', states=state)",
"def states_by_id(id):\n list_states = storage.all('State')\n state_id = 'State.{}'.format(id)\n if state_id in list_states:\n list_states = list_states[state_id]\n else:\n list_states = None\n return render_template('9-states.html', list_states=list_states)"
] |
[
"0.6518575",
"0.61676663",
"0.615199",
"0.6109608",
"0.6109441",
"0.60900664",
"0.6072669",
"0.5931966",
"0.58831435",
"0.5837981",
"0.5818557",
"0.58057237",
"0.5786659",
"0.5786354",
"0.5778798",
"0.5712893",
"0.56756985",
"0.5640747",
"0.5597875",
"0.5522777",
"0.54472834",
"0.5421105",
"0.53973055",
"0.53666955",
"0.5356236",
"0.53431284",
"0.5337101",
"0.5330727",
"0.53002524",
"0.5264817"
] |
0.6545722
|
0
|
The Amazon Resource Name (ARN) of the custom platform to use with the environment.
|
def platform_arn(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "platform_arn")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def platform_arn(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"platform_arn\")",
"def platform(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"platform\")",
"def platform():\n return \"micaz\"",
"def PlatformName():\n if override_platform_name:\n return override_platform_name\n if IsWindows():\n return 'win32'\n if IsLinux():\n return 'linux'\n if IsMac():\n return 'mac'\n raise NotImplementedError('Unknown platform \"%s\".' % sys.platform)",
"def product(self):\n return self.appName",
"def name(cls):\n\n system = platform.system()\n\n # Apply system map\n if system in NAME_MAP:\n system = NAME_MAP[system]\n\n return system",
"def GetOSName():\n return Config.osName_",
"def environment_label(self) -> str:\n return self._environment_label",
"def platform_config_filename(region, account_prefix, prod):\n return 'infra/platform-config/%s/%s/%s.json' % (\n account_prefix, \"prod\" if prod else \"dev\", region\n )",
"def GetPlatform(self):\n arch = \"None\"\n # check architecture name\n if \"CMTCONFIG\" in os.environ:\n arch = os.environ[\"CMTCONFIG\"]\n elif \"SCRAM_ARCH\" in os.environ:\n arch = os.environ[\"SCRAM_ARCH\"]\n return arch",
"def brand(self):\n return \"Nest Labs\"",
"def name(self) -> str:\n return f\"{self.platform_name} {self._sensor_name}\"",
"def platform(self):\n return self.random.choice([\n 'Laptop', \n 'Desktop', \n 'Workstation', \n 'Server', \n 'Virtual Machine', \n 'Container', \n 'Micro-Service', \n 'Droplet', \n 'SaaS'\n ])",
"def application_arn(self) -> Optional[str]:\n return pulumi.get(self, \"application_arn\")",
"def app_image_config_arn(self) -> Optional[str]:\n return pulumi.get(self, \"app_image_config_arn\")",
"def os_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"os_name\")",
"def platform_num(self) -> str:\n return pulumi.get(self, \"platform_num\")",
"def architecture(self) -> str:\n return pulumi.get(self, \"architecture\")",
"def architecture(self) -> str:\n return pulumi.get(self, \"architecture\")",
"def application_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_name\")",
"def application_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_name\")",
"def platform(self):\n # type: () -> string_types\n return self._platform",
"def architecture_name(self):\n return get_architecture_name(self.architecture)",
"def get_os() -> str:\n system = platform.system().lower()\n\n if system == \"linux\":\n machine = os.uname().machine\n if machine.startswith(\"arm\") or machine.startswith(\"aarch\"):\n system = \"pi\"\n\n return system + \"_\" + platform.architecture()[0]",
"def get_name():\n return config.APP_NAME",
"def platform(self, return_str=True):\n architecture = self.arch(\"docker\")\n host_platform = self.osversion() + \"/\" + architecture\n if return_str:\n return host_platform.lower()\n return self.parse_platform(host_platform)",
"def get_os_name(cls):\n return cls.get_os_type().name",
"def application_name(self) -> Optional[str]:\n return pulumi.get(self, \"application_name\")",
"def environment_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"environment_name\")",
"def arn(self) -> str:\n return pulumi.get(self, \"arn\")"
] |
[
"0.7735306",
"0.66278744",
"0.6624211",
"0.6606932",
"0.6323558",
"0.6283941",
"0.6234539",
"0.61513674",
"0.6081606",
"0.60462385",
"0.60026896",
"0.5988347",
"0.598575",
"0.59628206",
"0.5962734",
"0.59611076",
"0.59324396",
"0.59225625",
"0.59225625",
"0.5922316",
"0.5922316",
"0.59092885",
"0.58717483",
"0.5853615",
"0.5838145",
"0.58310544",
"0.5810798",
"0.577213",
"0.5770021",
"0.5745066"
] |
0.7835914
|
0
|
The name of an Elastic Beanstalk solution stack (platform version) to use with the environment.
|
def solution_stack_name(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "solution_stack_name")
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def solution_stack_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"solution_stack_name\")",
"def stackname(self):\n return self.BASE_NAME.format(**self.conf)",
"def stack_name(self) -> str:\n return jsii.get(self, \"stackName\")",
"def env_name(self):\n return f\"{self.project_name}-{self.stage}\"",
"def stack_name(self):\n stack_name = getattr(self, '__stack_name', None)\n if (\n self.args.stack_name and\n not stack_name\n ):\n stack_name = self.args.stack_name\n elif not stack_name:\n stack_name = \"nephoria-stack-\" + str(int(time.time()))\n\n setattr(self, '__stack_name', stack_name)\n return stack_name",
"def name(self):\n return self._env_name",
"def kernel_name():\n return \"python3\"",
"def stack_name(self) -> str:\n return self._values.get(\"stack_name\")",
"def name_tag(resource_name):\n return Join(\"\", [Ref('AWS::StackName'), '-', resource_name])",
"def name(self):\n return get_env_name(self.tool_name,\n self._python,\n self._requirements,\n self._tagged_env_vars)",
"def environment_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"environment_name\")",
"def get_name():\n return config.APP_NAME",
"def pipeline_stack_name(self) -> str:\n return self._values.get(\"pipeline_stack_name\")",
"def environment_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"environment_name\")",
"def environment_label(self) -> str:\n return self._environment_label",
"def stack_name(self) -> typing.Optional[str]:\n return self._values.get(\"stack_name\")",
"def get_egg_name():\n global eggname\n if not eggname:\n version = local('git describe --abbrev=4', capture=True)\n if version:\n version = '%s-%s' % (version, datetime.datetime.today().strftime('%Y%m%d'))\n eggname = APP_NAME + '-%s-py%s.egg' % (version.replace('-', '_'), python_version)\n return eggname",
"def stackname(self):\n raise NotImplementedError",
"def env_name(pre_chars='(', post_chars=')'):\n env_path = builtins.__xonsh_env__.get('VIRTUAL_ENV', '')\n if len(env_path) == 0 and xp.ON_ANACONDA:\n env_path = builtins.__xonsh_env__.get('CONDA_DEFAULT_ENV', '')\n env_name = os.path.basename(env_path)\n if env_name:\n return pre_chars + env_name + post_chars",
"def python_name(self):\n return self.requirement.name",
"def docker_image_tag(self, app):\n return f\"briefcase/{app.bundle}.{app.app_name.lower()}:{app.target_vendor}-{app.target_codename}\"",
"def get_soc_name():\n return get_soc_spec(\"SOC_VERSION\")",
"def _get_deployment_flavor():\n flavor = cfg.CONF.paste_deploy.flavor\n return '' if not flavor else ('-' + flavor)",
"def get_launch_name():\n\n if product_type == \"RHEL7\":\n launch_name = \"Errata-{0}_{1}_{2}_{3}_{4}_{5}CDN\".format(errata_id, product_type, variant, arch, test_level, cdn)\n \n elif product_type == \"RHEL8\":\n launch_name = \"Errata-{0}_{1}_{2}_{3}_{4}CDN\".format(errata_id, product_type, arch, test_level, cdn)\n\n return launch_name",
"def container_image_name(registry, component_name, version):\n if version is None:\n image = component_name + ':dev'\n else:\n image = '%s/%s:%s' % (registry, component_name, version)\n\n return image",
"def generate_cluster_stack_name(job):\n return 'cluster-%s----%s' % (job.compute_resource.id, job.id)",
"def stage_name(self) -> str:\n return pulumi.get(self, \"stage_name\")",
"def stage_name(self) -> str:\n return pulumi.get(self, \"stage_name\")",
"def version_name(self) -> str:\n return pulumi.get(self, \"version_name\")",
"def bucket_dual_stack_domain_name(self) -> str:\n return jsii.get(self, \"bucketDualStackDomainName\")"
] |
[
"0.6874254",
"0.68344885",
"0.6518337",
"0.65035015",
"0.6400849",
"0.62462306",
"0.61668295",
"0.6154686",
"0.6143118",
"0.6137554",
"0.61008966",
"0.6086461",
"0.60863656",
"0.60621977",
"0.60092354",
"0.5980697",
"0.5922324",
"0.5898759",
"0.5881433",
"0.58511484",
"0.582938",
"0.5816132",
"0.58146447",
"0.5811952",
"0.57712144",
"0.5765751",
"0.57646734",
"0.57646734",
"0.5764073",
"0.5694881"
] |
0.6848552
|
1
|
get global ip address
|
def get_global_ip():
network_info_providers = [
'http://api.ipify.org/',
'http://myip.dnsomatic.com',
'http://inet-ip.info/ip',
'http://v4.ident.me/',
]
random.shuffle(network_info_providers)
for url in network_info_providers:
try:
return requests.get(url).text.lstrip().rstrip()
except Exception:
continue
else:
log.info('cannot find global ip')
return ""
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_global_ip() -> str:\n return urllib.request.urlopen(\"https://icanhazip.com\").read().decode().strip()",
"def get_ip(self):",
"def get_host_ip_addr():\n return nova_conf.my_ip",
"def get_local_host_ip(self) -> str:",
"def get_IP(): \n \n return socket.gethostbyname(socket.gethostname())",
"def get_IPaddress():\n config = get_ifconfig()\n return config[0]",
"def get_ip():\n return os.getenv(\"HOST_IP\", \"127.0.0.1\")",
"def get_IP():\n\n return socket.gethostbyname(socket.gethostname())",
"def get_ip():\n return request.environ['HTTP_REMOTE_ADDR']",
"def get_my_ip():\r\n try:\r\n return [x[4] for x in conf.route.routes if x[2] != '0.0.0.0'][0]\r\n except IndexError:\r\n return '127.0.0.1'",
"def getLocalIpAddress() :\n \n if (platform.system() == 'Linux') :\n cmd = \"ifconfig wlan0 | grep 'inet addr:' | cut -d: -f2 | awk '{print $1}'\"\n return subprocess.check_output(cmd, shell=True) \n else : # Darwin\n return socket.gethostbyname(socket.gethostname())",
"def ip():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n ip = s.getsockname()[0]\n s.close()\n return ip",
"def _get_my_ip():\n try:\n csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n csock.connect(('8.8.8.8', 80))\n (addr, port) = csock.getsockname()\n csock.close()\n return addr\n except socket.error:\n return \"127.0.0.1\"",
"def get_local_ip():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.connect((\"8.8.8.8\", 80))\n local_ip = sock.getsockname()[0]\n sock.close()\n\n return local_ip",
"def get_ip_string():\n return netifaces.ifaddresses('br0')[netifaces.AF_INET][0]['addr']",
"def get_ip_address(self):\n raise NotImplementedError",
"def get_local_ip():\n\n return os.environ[LOCAL_IP_KEY]",
"def address(self):\n \n return self.__ip",
"def getLocalIP():\r\n try:\r\n csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n csock.connect(('8.8.8.8', 80))\r\n (addr, port) = csock.getsockname()\r\n csock.close()\r\n return addr\r\n except socket.error:\r\n return \"127.0.0.1\"",
"def get_ip():\n with hide(\"everything\"):\n ip_addresses = run('hostname -I').split(' ')\n return ip_addresses[0]",
"def getLocalhostIP():\n return socket.getaddrinfo('localhost', 0)[0][4][0]",
"def get_ip_address(self):\n return self.adb.get_ip_address()",
"def ip(self):\n return os.environ.get('REMOTE_ADDR')",
"def ipAddress():\n \n sk = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sk.connect((\"8.8.8.8\", 80))\n ip = (sk.getsockname()[0])\n sk.close()\n return str(ip)",
"def ip_address(self) -> str:\n return pulumi.get(self, \"ip_address\")",
"def ip(self) -> str:\n return pulumi.get(self, \"ip\")",
"def get_ip_address():\n try:\n return socket.gethostbyname(socket.getfqdn())\n except socket.gaierror as error:\n logger.warn(error)\n return socket.gethostbyname(\"\")",
"def get_ip_address(self):\n return self.__ip_address",
"def get_ip():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # doesn't even have to be reachable\n s.connect(('10.255.255.255', 1))\n ip = s.getsockname()[0]\n except Exception:\n ip = '127.0.0.1'\n finally:\n s.close()\n return ip",
"def getIp(self):\n raise NotImplementedError"
] |
[
"0.8177861",
"0.78713524",
"0.7836615",
"0.7808784",
"0.7742288",
"0.7727455",
"0.7672631",
"0.76301765",
"0.7592437",
"0.7556169",
"0.7501851",
"0.749981",
"0.74838996",
"0.7455439",
"0.7448763",
"0.7436739",
"0.74339193",
"0.74294287",
"0.7428558",
"0.73813033",
"0.73690134",
"0.73610294",
"0.7331656",
"0.73195267",
"0.730993",
"0.73076195",
"0.73062843",
"0.7304257",
"0.7274842",
"0.7255138"
] |
0.82571846
|
0
|
get global ipv6 address
|
def get_global_ip_ipv6():
network_info_providers = [
'http://v6.ipv6-test.com/api/myip.php',
'http://v6.ident.me/',
]
random.shuffle(network_info_providers)
for url in network_info_providers:
try:
return requests.get(url).text.lstrip().rstrip()
except Exception:
continue
else:
log.info('cannot find global ipv6 ip')
return ""
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_main_ipv6():\n try:\n # No data is actually transmitted (UDP)\n s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)\n s.connect( ('2001:4860:4860::8888', 53) )\n real_ip = s.getsockname()[0]\n s.close()\n return real_ip\n except socket.error as e:\n logging.error(\"Cannot retrieve current IPv6 address: %s\" % e)\n return None",
"def GlobalIpv6Address(self):\n if self.force_auto_sync:\n self.get('GlobalIpv6Address')\n return self._GlobalIpv6Address",
"def ipv6_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ipv6_address\")",
"def find_ipv6():\n\n test_host = '2600::' # Sprint.net\n try:\n with socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) as s:\n s.connect((test_host, 53))\n ipv6 = s.getsockname()[0]\n except:\n if cfg['debug']:\n print(\"Couldn't create a socket to %s\" % test_host)\n print(\"Check that you have a valid IPv6 default route\")\n ipv6 = None\n\n return ipv6",
"def get_if_addr6(iff):\n return next((x[0] for x in in6_getifaddr()\n if x[2] == iff and x[1] == IPV6_ADDR_GLOBAL), None)",
"def ipv6_address(self) -> pulumi.Output[str]:\n warnings.warn(\"\"\"use `ipv6_addresses` attribute instead\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"ipv6_address is deprecated: use `ipv6_addresses` attribute instead\"\"\")\n\n return pulumi.get(self, \"ipv6_address\")",
"def ipv6_address(self) -> Optional[pulumi.Input[str]]:\n warnings.warn(\"\"\"use `ipv6_addresses` attribute instead\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"ipv6_address is deprecated: use `ipv6_addresses` attribute instead\"\"\")\n\n return pulumi.get(self, \"ipv6_address\")",
"def LinkLocalIpv6Address(self):\n if self.force_auto_sync:\n self.get('LinkLocalIpv6Address')\n return self._LinkLocalIpv6Address",
"def ipv6_address(self) -> str:\n warnings.warn(\"\"\"The IPv6 address assigned to the instance. (Deprecated) This property was applicable only to First Generation instances.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"ipv6_address is deprecated: The IPv6 address assigned to the instance. (Deprecated) This property was applicable only to First Generation instances.\"\"\")\n\n return pulumi.get(self, \"ipv6_address\")",
"def PrefixIpv6Address(self):\n if self.force_auto_sync:\n self.get('PrefixIpv6Address')\n return self._PrefixIpv6Address",
"def ipv6_address(self) -> Optional[pulumi.Input[str]]:\n warnings.warn(\"\"\"The IPv6 address assigned to the instance. (Deprecated) This property was applicable only to First Generation instances.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"ipv6_address is deprecated: The IPv6 address assigned to the instance. (Deprecated) This property was applicable only to First Generation instances.\"\"\")\n\n return pulumi.get(self, \"ipv6_address\")",
"def ipv6_address_space(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"ipv6_address_space\")",
"def get_intf_address(self, intf, pod, v6=False):\n if v6:\n cmd = [\"ifconfig \" + intf + \" | grep Global\"]\n output = pod.run_cmd_on_vm(cmd)\n ip6 = re.search(\n r'inet6\\s+addr\\s*:\\s*(\\S*)',\n output['ifconfig eth0 | grep Global'])\n ip6_addr = ip6.group(1)\n return ip6_addr\n cmd = [\"ifconfig \" + intf + \" | grep inet\"]\n output = pod.run_cmd_on_vm(cmd)\n ip = re.search(\n r'inet\\s+addr\\s*:\\s*(\\d+.\\d+.\\d+.\\d+)',\n output['ifconfig eth0 | grep inet'])\n ip_addr = ip.group(1)\n return ip_addr",
"def ipv6_address(self) -> pulumi.Output[str]:\n warnings.warn(\"\"\"The IPv6 address assigned to the instance. (Deprecated) This property was applicable only to First Generation instances.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"ipv6_address is deprecated: The IPv6 address assigned to the instance. (Deprecated) This property was applicable only to First Generation instances.\"\"\")\n\n return pulumi.get(self, \"ipv6_address\")",
"def local_address(self) -> T_SockAddr:\n from anyio._core._sockets import convert_ipv6_sockaddr\n return convert_ipv6_sockaddr(self.raw_socket.getsockname())",
"def ipv6_address_space(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ipv6_address_space\")",
"def _get_virtual_oper_VipV6_address(self):\n return self.__virtual_oper_VipV6_address",
"def get_if_raw_addr6(iff):\n ip6 = get_if_addr6(iff)\n if ip6 is not None:\n return inet_pton(socket.AF_INET6, ip6)\n\n return None",
"def ipv6_addresses(self) -> Dict[str, List[IPv6Address]]:\n log.debug(\"Host %s: ipv6 addresses of the devices interfaces %s.\", self.host, self._get_ipv6_addresses(\"self\"))\n return self._get_ipv6_addresses(\"self\")",
"def toV6(self):\n return V6Address.fromV4(self)",
"def get_global_ip() -> str:\n return urllib.request.urlopen(\"https://icanhazip.com\").read().decode().strip()",
"def get_IPaddress():\n config = get_ifconfig()\n return config[0]",
"def get_ip_address(ifname, family=socket.AF_INET):\n if family == socket.AF_INET:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n ip = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, # SIOCGIFADDR\n struct.pack('256s', ifname[:15]))[20:24])\n except IOError:\n return None\n return ip\n elif family == socket.AF_INET6:\n try:\n with open(\"/proc/net/if_inet6\", \"r\") as f:\n if6lines = f.readlines()\n for line in if6lines:\n val = line.split()\n # filter LINKLOCAL address\n if val[3] != '20' and val[-1] == str(ifname):\n return Convert.format_proc_address(val[0])\n return None\n except Exception as e:\n SysTools.logger.error(\"can not get the ipv6 address of %s : %s\", str(ifname), str(e))\n return None\n else:\n return None",
"def ipv6_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"ipv6_addresses\")",
"def remote_address(self) -> T_SockAddr:\n from anyio._core._sockets import convert_ipv6_sockaddr\n return convert_ipv6_sockaddr(self.raw_socket.getpeername())",
"def get_global_ip():\n network_info_providers = [\n 'http://api.ipify.org/',\n 'http://myip.dnsomatic.com',\n 'http://inet-ip.info/ip',\n 'http://v4.ident.me/',\n ]\n random.shuffle(network_info_providers)\n for url in network_info_providers:\n try:\n return requests.get(url).text.lstrip().rstrip()\n except Exception:\n continue\n else:\n log.info('cannot find global ip')\n return \"\"",
"def get_ipv6_list():\n ipv6 = __grains__.get(\"ipv6\")\n\n return \" \".join([\"[\" + ip + \"]\" for ip in ipv6])",
"def get_ip_string():\n return netifaces.ifaddresses('br0')[netifaces.AF_INET][0]['addr']",
"def ipv6_addresses(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"ipv6_addresses\")",
"def local_ipv6_network_cidr(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"local_ipv6_network_cidr\")"
] |
[
"0.8276269",
"0.82484275",
"0.7695294",
"0.7630343",
"0.7439502",
"0.7352564",
"0.7306042",
"0.7292912",
"0.72007406",
"0.7183485",
"0.71534765",
"0.71408135",
"0.7075447",
"0.7072936",
"0.7067728",
"0.700647",
"0.6943251",
"0.6901189",
"0.68371254",
"0.68331796",
"0.6820401",
"0.6760036",
"0.6730715",
"0.6719501",
"0.67041224",
"0.6695512",
"0.6682327",
"0.6625417",
"0.6603928",
"0.66029817"
] |
0.8327801
|
0
|
Implementation of binary operator between DataFrames on different indices. A new DataFrame representing an inmemory DolphinDB table is returned. It is garenteed that both DataFrames have no where_expr.
|
def _binary_op_on_different_indices(self, other, func, axis): # TODO: add axis check
def merge_columns(self_columns, other_columns):
"""
Align the input columns, filling the missing columns with None
--------
Examples
--------
>>> merge_columns(
... ["a", "b", "ba", "d", "f"],
... ["e", "c", "d", "g", "ga", "a"]
... )
(('a','a'),('b',None),('ba',None),(None,c),('d','d'),(None,'e'),('f',None),(None,'g'),(None,'ga'))
"""
sorted_self_columns, sorted_other_columns = sorted(self_columns), sorted(other_columns)
self_idx = other_idx = 0
self_len, other_len = len(self_columns), len(other_columns)
while self_idx < self_len and other_idx < other_len:
curr_self_column, curr_other_column = sorted_self_columns[self_idx], sorted_other_columns[other_idx]
if curr_self_column == curr_other_column:
yield curr_self_column, curr_other_column
self_idx += 1
other_idx += 1
elif curr_self_column < curr_other_column:
yield curr_self_column, None
self_idx += 1
else:
yield None, curr_other_column
other_idx += 1
while self_idx < self_len:
yield sorted_self_columns[self_idx], None
self_idx += 1
while other_idx < other_len:
yield None, sorted_other_columns[other_idx]
other_idx += 1
assert isinstance(self, _Frame)
assert isinstance(other, _Frame)
if ((not self._in_memory and len(self._index_columns) == 0)
or (not other._in_memory and len(other._index_columns) == 0)):
raise ValueError("Frame has no default index if it is not in memory")
session = self._session
self_var_name, other_var_name = self._var_name, other._var_name
if other._is_dataframe_like:
self_data_columns = self._data_columns
other_data_columns = other._data_columns
index_list, from_clause = _generate_joiner(
self_var_name, other_var_name, self._index_columns, other._index_columns)
if self_data_columns == other_data_columns:
select_list = (f"{func}({self_var_name}.{c}, {other_var_name}.{c}) as {c}"
for c in self_data_columns)
data_columns = self_data_columns
else:
merged_columns = list(merge_columns(self_data_columns, other_data_columns))
select_list = (f"00f as {s if o is None else o}" if s is None or o is None
else f"{func}({self_var_name}.{s}, {other_var_name}.{s}) as {s}"
for s, o in merged_columns)
data_columns = [s if o is None else o for s, o in merged_columns]
select_list = itertools.chain(index_list, select_list)
script = sql_select(select_list, from_clause)
elif other._is_series_like:
self_data_columns = self._data_columns
other_data_column = other._data_columns[0]
index_list, from_clause = _generate_joiner(
self._var_name, other._var_name, self._index_columns, other._index_columns)
select_list = (f"{func}({self_var_name}.{c}, {other_var_name}.{other_data_column}) as {c}"
for c in self_data_columns)
data_columns = self_data_columns
select_list = itertools.chain(index_list, select_list)
script = sql_select(select_list, from_clause)
return self._get_from_script(
session, script, data_columns=data_columns, index_map=self._index_map, index=self._index)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def where(self, cond, other, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.where)(\n self, cond=cond, other=other, **kwargs\n )",
"def union_all(x: DataFrame, y: DataFrame) -> DataFrame:\n _check_xy(x, y)\n return bind_rows(x, y, __calling_env=CallingEnvs.REGULAR)",
"def compare(self, other, align_axis, keep_shape, keep_equal, result_names):\n return DataFrameDefault.register(pandas.DataFrame.compare)(\n self,\n other=other,\n align_axis=align_axis,\n keep_shape=keep_shape,\n keep_equal=keep_equal,\n result_names=result_names,\n )",
"def _copy_experimental_conditions_to_second_df(self, df1, df1_cols, df2, df2_cols):\n _cols_ = np.array([df1_cols, df2_cols])\n has_cols = _cols_ != set([])\n exp_cols = _cols_[has_cols]\n if len(exp_cols) == 1: # only one DataFrame has additional columns\n _dfs_ = [df1, df2]\n exp_cols = list(exp_cols[0])\n df_with_cols, df_without_cols = _dfs_[list(has_cols).index(True)], _dfs_[list(has_cols).index(False)]\n exp_cols_only_df = df_with_cols[exp_cols].drop_duplicates()\n num_unique_exp_rows = len(exp_cols_only_df)\n len_df_without_cols = len(df_without_cols)\n\n try:\n expanded_df_without_cols = pd.concat([df_without_cols] * num_unique_exp_rows, ignore_index=True)\n expanded_df_without_cols[exp_cols] = pd.DataFrame(np.repeat(\n exp_cols_only_df.values, len_df_without_cols, axis=0),\n columns=exp_cols)\n return tuple([(expanded_df_without_cols, df_with_cols)[i] for i in _cols_ != set([])]\n + [set(exp_cols), exp_cols_only_df])\n\n except ValueError: # breaks when df_with_out_columns is of len 0.\n return tuple([(pd.DataFrame(columns=list(set(exp_cols)|set(df_without_cols.columns))), df_with_cols)[i]\n for i in _cols_ != set([])] + [set(exp_cols), exp_cols_only_df])\n else:\n return self._combine_experimental_conditions(df1, df1_cols, df2, df2_cols)",
"def intersection(self, other, mode: str = \"outer\"):\n # TODO options for which extra fields to keep\n # by default, keep just the fields in 'table'\n if mode == \"trim\":\n # Slower\n chunks = [\n chunk.data\n for _, chunk in self.by_ranges(other, mode=mode, keep_empty=False)\n ]\n return self.as_dataframe(pd.concat(chunks))\n # Faster\n slices = iter_slices(self.data, other.data, mode, False)\n indices = np.concatenate(list(slices))\n return self.as_dataframe(self.data.loc[indices])",
"def __or__(self, other):\n tmp = self.rows[:]\n tmp.extend(other.rows[:]) # use copys of lists !\n return Table(tmp)",
"def __eq__(self, other: Any) -> ColumnOperators: # type: ignore[override]\n return self.operate(eq, other)",
"def __and__(self, other):\n tmp = [ r for r in self.rows if r in other.rows ]\n return Table(tmp)",
"def dataframe_diff(xxa,xxb):\n\n xa=pd.DataFrame(xxa)\n xb=pd.DataFrame(xxb)\n merged = xa.merge(xb, indicator=True, how='outer')\n\n diff=merged[merged['_merge'] != 'both']\n\n return diff",
"def df_update(self, other, **kwargs): # noqa: PR02\n return BinaryDefault.register(pandas.DataFrame.update, inplace=True)(\n self, other=other, **kwargs\n )",
"def combine(self, other, **kwargs): # noqa: PR02\n return BinaryDefault.register(pandas.DataFrame.combine)(\n self, other=other, **kwargs\n )",
"def __sub__(self, other: Any) -> ColumnOperators:\n return self.operate(sub, other)",
"def _inherit_binary_operation(self, other, op):\n sdata = self.data\n if isinstance(op, basestring) and hasattr(sdata, op):\n bound_op = getattr(sdata, op)\n else:\n def bound_op(odata):\n return op(sdata, odata)\n\n bset = self.bset\n if isinstance(other, type(self)) or isinstance(self, type(other)):\n obset = other.bset\n if not ((bset == obset) or\n bset.shape == () or\n obset.shape == ()):\n raise ValueError(\"instances of {} must be defined over \"\n \"instances of {} that compare equal for \"\n \"binary operations to be defined\"\n .format(self.__class__.__name__,\n bset.__class__.__name__))\n new_data = bound_op(other.data)\n if bset.shape == ():\n bset = obset\n else:\n new_data = bound_op(other)\n\n return type(self)(new_data, bset)",
"def mask(self, cond, other, **kwargs): # noqa: PR01\n return DataFrameDefault.register(pandas.DataFrame.mask)(\n self, cond, other, **kwargs\n )",
"def outer_join(self, table: Union[str, sa.Table], left_where: Union[str, sa.Column, BinaryExpression], right_where: Union[str, sa.Column] = None, alias: str = None) -> B[B, E]:",
"def _(x: DataFrame, y: DataFrame) -> DataFrame:\n _check_xy(x, y)\n indicator = \"__datar_setdiff__\"\n out = pandas.merge(x, y, how=\"left\", indicator=indicator)\n\n from .distinct import distinct\n\n return distinct(\n out[out[indicator] == \"left_only\"]\n .drop(columns=[indicator])\n .reset_index(drop=True),\n __calling_env=CallingEnvs.REGULAR,\n )",
"def dataframe_crossjoin(df1, df2, **kwargs):\n df1['_tmpkey'] = 1\n df2['_tmpkey'] = 1\n\n res = pd.merge(df1, df2, on='_tmpkey', **kwargs).drop('_tmpkey', axis=1)\n res.index = pd.MultiIndex.from_product((df1.index, df2.index))\n\n df1.drop('_tmpkey', axis=1, inplace=True)\n df2.drop('_tmpkey', axis=1, inplace=True)\n\n return res",
"def test_arithmetic_operations() -> None:\n\n # one two\n # 0 1\n # 2 3\n # 4 5\n df = pd.DataFrame(np.arange(6).reshape((3, 2)), columns=[\"one\", \"two\"])\n\n series = df.iloc[0] # first row == (0, 1)\n\n assert series.index.values.tolist() == [\"one\", \"two\"]\n assert series.values.tolist() == [0, 1]\n\n # Arithmetic operations between frames and series match the index of the\n # series (column names) on the columns of the frame, broadcasting over the\n # rows by default.\n\n df2 = df.sub(series) # axis=1\n\n # one two\n # 0 0\n # 2 2\n # 4 4\n assert df2.values.flatten().tolist() == [0, 0, 2, 2, 4, 4]\n\n # If you want to match on rows, use axis=0. This will match the index of the\n # series (row indices) on the rows of the frame, broadcasting over the\n # columns by default.\n series = df.loc[:, \"one\"]\n\n df2 = df.sub(series, axis=0)\n # one two\n # 0 1\n # 0 1\n # 0 1\n assert df2.values.flatten().tolist() == [0, 1, 0, 1, 0, 1]",
"def join_where(self, table, one, operator, two, type='inner'):\n return self.join(table, one, operator, two, type, True)",
"def cross(df1, df2, **kwargs):\r\n df1['_tmpkey'] = 1\r\n df2['_tmpkey'] = 1\r\n\r\n res = pd.merge(df1, df2, on='_tmpkey', **kwargs).drop('_tmpkey', axis=1)\r\n res.index = pd.MultiIndex.from_product((df1.index, df2.index))\r\n\r\n df1.drop('_tmpkey', axis=1, inplace=True)\r\n df2.drop('_tmpkey', axis=1, inplace=True)\r\n\r\n return res",
"def join(self, right, **kwargs): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.join)(self, right, **kwargs)",
"def cross(df1, df2, **kwargs):\r\n df1['_tmpkey'] = 1\r\n df2['_tmpkey'] = 1\r\n\r\n res = pd.merge(df1, df2, on='_tmpkey', **kwargs).drop('_tmpkey', axis=1)\r\n res.index = pd.MultiIndex.from_product((df1.index, df2.index))\r\n\r\n df1.drop('_tmpkey', axis=1, inplace=True)\r\n df2.drop('_tmpkey', axis=1, inplace=True)\r\n return res",
"def run_and_compare(\n fn,\n data,\n data2=None,\n force_lazy=True,\n force_hdk_execute=False,\n force_arrow_execute=False,\n allow_subqueries=False,\n comparator=df_equals,\n **kwargs,\n):\n\n def run_modin(\n fn,\n data,\n data2,\n force_lazy,\n force_hdk_execute,\n force_arrow_execute,\n allow_subqueries,\n constructor_kwargs,\n **kwargs,\n ):\n kwargs[\"df1\"] = pd.DataFrame(data, **constructor_kwargs)\n kwargs[\"df2\"] = pd.DataFrame(data2, **constructor_kwargs)\n kwargs[\"df\"] = kwargs[\"df1\"]\n\n if force_hdk_execute:\n set_execution_mode(kwargs[\"df1\"], \"hdk\")\n set_execution_mode(kwargs[\"df2\"], \"hdk\")\n elif force_arrow_execute:\n set_execution_mode(kwargs[\"df1\"], \"arrow\")\n set_execution_mode(kwargs[\"df2\"], \"arrow\")\n elif force_lazy:\n set_execution_mode(kwargs[\"df1\"], \"lazy\")\n set_execution_mode(kwargs[\"df2\"], \"lazy\")\n\n exp_res = fn(lib=pd, **kwargs)\n\n if force_hdk_execute:\n set_execution_mode(exp_res, \"hdk\", allow_subqueries)\n elif force_arrow_execute:\n set_execution_mode(exp_res, \"arrow\", allow_subqueries)\n elif force_lazy:\n set_execution_mode(exp_res, None, allow_subqueries)\n\n return exp_res\n\n constructor_kwargs = kwargs.pop(\"constructor_kwargs\", {})\n try:\n kwargs[\"df1\"] = pandas.DataFrame(data, **constructor_kwargs)\n kwargs[\"df2\"] = pandas.DataFrame(data2, **constructor_kwargs)\n kwargs[\"df\"] = kwargs[\"df1\"]\n ref_res = fn(lib=pandas, **kwargs)\n except Exception as err:\n with pytest.raises(type(err)):\n exp_res = run_modin(\n fn=fn,\n data=data,\n data2=data2,\n force_lazy=force_lazy,\n force_hdk_execute=force_hdk_execute,\n force_arrow_execute=force_arrow_execute,\n allow_subqueries=allow_subqueries,\n constructor_kwargs=constructor_kwargs,\n **kwargs,\n )\n _ = exp_res.index\n else:\n exp_res = run_modin(\n fn=fn,\n data=data,\n data2=data2,\n force_lazy=force_lazy,\n force_hdk_execute=force_hdk_execute,\n force_arrow_execute=force_arrow_execute,\n allow_subqueries=allow_subqueries,\n constructor_kwargs=constructor_kwargs,\n **kwargs,\n )\n comparator(ref_res, exp_res)",
"def filter_input(input_df, target_df):\n # input_df = input_df.reindex(target_df.index, copy=False)\n data_df = pd.concat((input_df, target_df), join=\"inner\", copy=False, axis=1)\n return data_df",
"def filter_dataframes(dfs, xs, ys, table_ys, args_list, valid_keys):\n # Descs: descriptions\n # ys_dict == {string (y): List(Serial Data)}\n xs_dict = {x: [] for x in xs}\n ys_dict = {y: [] for y in ys}\n tables = collections.OrderedDict(\n [(key, []) for key in ['index'] + valid_keys + list(table_ys.keys())])\n for i, args in enumerate(args_list):\n # get df from a result\n tmp = dfs\n for key, val in args.items():\n if val is None:\n tmp = tmp[tmp[key].isnull()]\n else:\n tmp = tmp[tmp[key] == val]\n\n for x in xs:\n xs_dict[x].append(tmp[x].values.tolist())\n for y in ys:\n ys_dict[y].append(tmp[y].values.tolist())\n\n for table_y, value_type in table_ys.items():\n if value_type == 'min':\n tables[table_y].append(tmp[table_y].min())\n elif value_type == 'max':\n tables[table_y].append(tmp[table_y].max())\n else:\n raise ValueError\n for key in valid_keys:\n if key in args:\n tables[key].append(args[key])\n else:\n tables[key].append(None)\n\n tables['index'] = list(range(len(args_list)))\n return xs_dict, ys_dict, tables",
"def create_Xy_df(X_df, y_df, on_cols):\n return pd.merge(X_df, y_df, how='inner', on=on_cols)",
"def return_subtraction_df(\n df_1: pd.DataFrame,\n df_2: pd.DataFrame,\n index_col=\"yearmon\"\n) -> pd.DataFrame:\n df_1 = df_1.set_index(index_col).copy()\n df_2 = df_2.set_index(index_col).copy()\n\n overlapping_index_values = sorted(list(set(df_1.index.intersection(df_2.index))))\n num_cols = df_1.select_dtypes(include=np.number).columns.to_list()\n\n df_1_num_values = df_1.loc[overlapping_index_values, num_cols].to_numpy()\n df_2_num_values = df_2.loc[overlapping_index_values, num_cols].to_numpy()\n df_diff_values = df_1_num_values - df_2_num_values\n df_diff = pd.DataFrame(\n df_diff_values,\n columns=num_cols,\n index=sorted(overlapping_index_values)\n )\n return df_diff",
"def sjoin(left_df, right_df, how=..., op=..., lsuffix=..., rsuffix=...):\n ...",
"def df_equal(left: pd.DataFrame, right: pd.DataFrame, **kwargs) -> bool:\n pd.testing.assert_frame_equal(left, right, **kwargs)\n return True",
"def __ge__(self, other: Any) -> ColumnOperators:\n return self.operate(ge, other)"
] |
[
"0.5993737",
"0.59794873",
"0.5897485",
"0.5851102",
"0.5847667",
"0.58340734",
"0.57924235",
"0.5676979",
"0.564678",
"0.56334287",
"0.54969245",
"0.5460089",
"0.5447747",
"0.544339",
"0.5425218",
"0.5419818",
"0.5405718",
"0.5401967",
"0.53926265",
"0.5366445",
"0.5351063",
"0.53500974",
"0.534686",
"0.5345861",
"0.5337002",
"0.53364885",
"0.5335789",
"0.5335751",
"0.5326944",
"0.5318533"
] |
0.6160066
|
0
|
Open a window to compose an email, with the edi invoice dian template message loaded by default
|
def action_invoice_dian_resend(self):
self.ensure_one()
template = self.env.ref('l10n_co_e-invoice.email_template_edi_invoice_dian', False)
compose_form = self.env.ref('mail.email_compose_message_wizard_form', False)
ctx = dict(
default_model='account.invoice',
default_res_id=self.id,
default_use_template=bool(template),
default_template_id=template and template.id or False,
default_composition_mode='comment',
mark_invoice_as_sent=True,
)
return {
'name': _('Compose Email'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form.id, 'form')],
'view_id': compose_form.id,
'target': 'new',
'context': ctx,
}
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def send_by_email(self):\r\n ir_model_data = self.env['ir.model.data']\r\n try:\r\n template_id = ir_model_data.get_object_reference(\r\n 'ng_church', 'email_template_church_pledge_report')[1]\r\n except ValueError:\r\n template_id = False\r\n try:\r\n compose_form_id = ir_model_data.get_object_reference(\r\n 'mail', 'email_compose_message_wizard_form')[1]\r\n except ValueError:\r\n compose_form_id = False\r\n ctx = dict(self._context)\r\n ctx.update({\r\n 'default_model': 'church.pledge',\r\n 'default_res_id': self._ids[0],\r\n 'default_use_template': bool(template_id),\r\n 'default_template_id': template_id,\r\n 'default_composition_mode': 'comment',\r\n })\r\n return {\r\n 'name': _('Compose Email'),\r\n 'type': 'ir.actions.act_window',\r\n 'view_type': 'form',\r\n 'view_mode': 'form',\r\n 'res_model': 'mail.compose.message',\r\n 'views': [(compose_form_id, 'form')],\r\n 'view_id': compose_form_id,\r\n 'target': 'new',\r\n 'context': ctx,\r\n }",
"def __init__(self):\r\n self.window = 'dag_emailWindow'\r\n self.title = 'dagRenderMail'\r\n self.size= (195, 290);\r\n \r\n #Sets some defaults\r\n self.subject='Render Complete on '+str(dag_compName());\r\n self.login= '[email protected]'\r\n self.password='Password'\r\n self.to='[email protected]'\r\n self.time='10'\r\n self.smtp='smtp.gmail.com:587'\r\n self.render = ''\r\n \r\n #Default message body\r\n self.body='Your render on '+str(dag_compName())+' is now complete.' + \"this message is automatically generated by dagMail. \\n dagmail script by Dhruv Govil www.dgovil.com \\n\\n\\n\"\r\n \r\n \r\n #default name for settings file. Can be anything. \r\n self.config='dagmail.settings'\r\n \r\n #Default MEL scripts. Don't change.\r\n self.preScr = 'python \"import dagMail\";python \"dagMail.dagMail.preScript()\"'\r\n self.postScr = 'python \"import dagMail\";python \"dagMail.dagMail.postScript()\"'",
"def send_payslip(self):\n self.ensure_one()\n ir_model_data = self.env['ir.model.data']\n try:\n template_id = ir_model_data.get_object_reference('send_email_payslips', 'email_template_hr_payslip')[1]\n except ValueError:\n template_id = False\n try:\n compose_form_id = ir_model_data.get_object_reference('mail', 'email_compose_message_wizard_form')[1]\n except ValueError:\n compose_form_id = False\n\n print 'user', self.employee_id.user_id\n\n user = self.env['res.users'].browse(self.employee_id.user_id.id)\n print 'partner_id', user.partner_id.id\n ctx = dict()\n ctx.update({\n 'default_model': 'hr.payslip',\n 'default_res_id': self.ids[0],\n 'default_use_template': bool(template_id),\n 'default_template_id': template_id,\n 'default_composition_mode': 'comment',\n 'default_partner_id': user.partner_id.id,\n })\n return {\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'mail.compose.message',\n 'views': [(compose_form_id, 'form')],\n 'view_id': compose_form_id,\n 'target': 'new',\n 'context': ctx,\n }",
"def _show_popup(self) -> None:\n\n top = tk.Toplevel()\n email_list_len = len(self.get_recipients())\n msg = tk.messagebox.askquestion('Confirm send emails', 'Are you sure you want to email {} client{}?'\n .format(email_list_len, \"s\" if email_list_len > 1 else \"\"),\n icon='warning')\n if msg == \"yes\":\n self._disable_buttons()\n email_process(self.get_recipients())\n top.destroy()\n else:\n top.destroy()",
"def open_client(introducing, msg):\n subject = urllib.quote(\"Introduction from %s\" % settings.name)\n body = urllib.quote(msg)\n s = \"mailto:?subject=%s&body=%s\" % (subject, body)\n if \"linux\" in sys.platform:\n proc_args = [\"xdg-open\", s]\n elif \"darwin\" in sys.platform:\n proc_args = [\"open\", s]\n # TODO: os.startfile works in Windows?\n p = subprocess.Popen(proc_args)",
"def open_email(self):\n self.driver.execute_script(\"window.scrollTo(0, 700)\")\n self.click_on_element_by_css(tep.OPEN_EMAIL_BUTTON)",
"def openemail(event):\n import webbrowser\n webbrowser.open(emailurl)\n close(event)",
"def createSendMailFrame(self, empireDict):\n self.destroyTempFrames()\n self.sendMailInfo = anwp.gui.sendmailinfo.SendMailInfoFrame(self, self.game.app, empireDict)\n self.tempFrames.append(self.sendMailInfo)",
"def email(self):\r\n webbrowser.open(\"mailto: [email protected]\")",
"def email_page(data):\n subject = f\"Inkbusters form contact: {data['title']}\"\n sender = current_app.config[\"MAIL_USERNAME\"]\n recipients= ['[email protected]']\n text_body=render_template('email/email_contact.txt', data=data)\n html_body=render_template('email/email_contact.html', data=data)\n\n send_email(\n subject=subject,\n sender=sender,\n recipients=recipients,\n text_body=text_body,\n html_body=html_body\n )",
"def send_contact_us_receipt_email(**data):\n mail_file = os.path.join(APP_PATH, \"templates\", \"main\",\n \"contact-us-receipt\", \"content.txt\")\n with open(mail_file, \"r\") as f:\n msg_text = f.read()\n msg_html = render_template(\"main/contact-us-receipt/content.html\")\n msg = Message(\n f'[SetNow Support] Re: {data[\"subject\"]}',\n sender=\"[email protected]\",\n recipients=[data[\"email\"]],\n )\n msg.body = msg_text\n msg.html = msg_html\n mail.send(msg)",
"def send_created_email(self):\n if settings.NOTIFY_NEW_REG:\n to = settings.NOTIFY_NEW_REG\n message = \"\"\"\\\nGreetings,<br><br>\n\nA new vehicle registration has been submitted by %s.<br><br>\n\nGo here to view or edit the request: <br>\n<a href=\"%s\">%s</a>\n<br><br>\nSincerely,<br><br>\nThe Janelia Parking Permit Program\n \"\"\" % (self.user_display_name(), self.get_edit_url(True), self.get_edit_url(True))\n subject = 'A new parking permit request has been entered'\n from_email = '[email protected]'\n text_content = re.sub(r'<[^>]+>','',message)\n html_content = message\n msg = EmailMultiAlternatives(subject, text_content, from_email, to)\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()",
"def pcorMacVerification(window,refrenceid,objectidentifier,texttoenter):\n try:\n buttons = getAppButtons(window)\n atomacclick(buttons[9])\n childwindow = refrenceid.windowsR()\n protectMoreDevicestitle = getApplicatontitle(childwindow[0])\n entertext(protectMoreDevicestitle,objectidentifier,texttoenter)\n except Exception as er:\n return False\n print \"Not able to able to send mail\"",
"def send_email(self):\n EmailMsg = EmailMessage(\"Your quotation\", \"Please fin attached the quotation you requested\", '[email protected]', [\n self.customer.email], headers={'Reply-To': '[email protected]'})\n pdf = self.generate_pdf()\n EmailMsg.attach('yourChoosenFileName.pdf', pdf, 'application/pdf')\n # Use True when able to handle exception\n # see in settings.py for EMAIL_BACKEND configuration\n EmailMsg.send(fail_silently=False)",
"def sendsms(window,refrenceid,image,email):\n try:\n buttons = getAppButtons(window)\n atomacclick(buttons[10])\n childwindow = refrenceid.windowsR()\n protectMoreDevicesbuttons = getAppButtons(childwindow[0])\n protectMoreDevicestitle = childwindow[0].getApplicatontitle()\n ldtp.enterstring(protectMoreDevicestitle,image,email)\n #Need to write after click\n except Exception as er:\n return False\n print \"Not able to send SMS\"",
"def create_new_mail(self):\n self.driver.get(consts.TEMP_MAIL)\n soup = BeautifulSoup(self.driver.page_source)\n self.mail = soup.find(id=\"email_id\").attrs[\"data-value\"]",
"def __init__(self,template_file, **kwargs):\r\n \r\n env = Environment(\r\n loader=PackageLoader('email_generator', 'templates'),\r\n autoescape=select_autoescape(['html', 'xml'])\r\n )\r\n template = env.get_template(template_file)\r\n self.body = template.render(**kwargs)",
"def action_invite(self):\n self.ensure_one()\n\n if not self.env.user.email:\n raise UserError(_(\"Unable to post message, please configure the sender's email address.\"))\n\n mail_values = []\n for partner_id in self.partner_ids:\n slide_channel_partner = self.channel_id._action_add_members(partner_id)\n if slide_channel_partner:\n mail_values.append(self._prepare_mail_values(slide_channel_partner))\n\n # TODO awa: change me to create multi when mail.mail supports it\n for mail_value in mail_values:\n self.env['mail.mail'].sudo().create(mail_value)\n\n return {'type': 'ir.actions.act_window_close'}",
"def send_object(self):\n for object_ in self.objects:\n strCC = '; '.join([object_.ter_dir_email, object_.successor_email])\n strCC += \"; [email protected]; [email protected]\"\n strSubject = \"Инкассация и вывоз POS-терминала при закрытии ТТ\"\n outMail = self.outlook.Application.CreateItemFromTemplate(\n CLOSING_MAIL_TEMPLATE\n )\n fixture = {\n 'дата+1': self.event_date.strftime('%d.%m.%Y'),\n 'преемник': object_.successor_full_name,\n 'имяТТ': f'ЦМС {object_.object_code[-4:]} {object_.object_name}'\n }\n HTML_body_without_signature = outMail.HTMLBody\n outMail.Display()\n for k, v in fixture.items():\n HTML_body_without_signature = HTML_body_without_signature.replace('{' + k + '}', v)\n\n outMail.HTMLBody = HTML_body_without_signature\n outMail.To = object_.object_SAP_code\n outMail.CC = strCC\n outMail.Subject = strSubject\n outMail.importance = 2\n if datetime.now().date() + timedelta(days=1) < self.event_date:\n outMail.DeferredDeliveryTime = \\\n (self.event_date - timedelta(days=1)).strftime('%d.%m.%Y') + \" 17:00\"",
"def issue_book():\n issue_book_tk = IssueBookDialog()\n entries_args = [\n (\"Book ID : \", 0.2),\n (\"Issued To : \", 0.4)\n ]\n issue_book_tk.create_components(entries_args)\n issue_book_tk.mainloop()",
"def sendEmail(body, subject, email=\"\"):\n dest = [\"[email protected]\", \"[email protected]\"]\n if re.match(r\"\\w+@\\w+\\.\\w+\", email):\n if email not in dest:\n dest.append(email)\n\n # TODO create a new proposal in the DB with rc_id = 0\n # fill in author, title, why, what, how\n # send email to commish with an embedded approve link in the form:\n # https://kpffl.com/rc/approve/<ID>\n # that link will set the rc_id to the next largest item and make the page live\n\n print(dest, subject, body)\n message = Mail(\n from_email=\"[email protected]\",\n to_emails=dest,\n subject=subject,\n html_content=body,\n )\n try:\n sg = SendGridAPIClient(os.environ.get(\"SENDGRID_KEY\"))\n res = sg.send(message)\n except Exception as e:\n print(e, res)",
"def onAboutLeoEmail(self,event=None):\n \n try:\n import webbrowser\n webbrowser.open(\"mailto:\" + self.email)\n except:\n g.es(\"not found: \" + self.email)",
"def email_body_meeting_reminder():\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">Drats. <a href=\"#\" style=\"color:#1488CC\">{insert seller name} cancelled your appointment</a>.<br><br>'\n\tmsg = msg + '\\t\\t\\t <a href=\"#\" style=\"color:#1488CC\">Reschedule</a> or you can send a message to inquire about the cancellation. <br><br>'\n\tmsg = msg + '\\t\\t\\t And, don\\'t worry! You won\\'t be charged, promise. </font><br><br>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg",
"def email_body_review_reminder():\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr>td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;padding-left:75px; padding-right:75px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t <font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">We hope you had a great appointment!<br>'\n\tmsg = msg + '\\t\\t\\t Your opinion goes a long way—write up your review of the appointment so others can learn from your experience with <a href=\"#\" style=\"color:#1488CC\">{user\\'s name}</a></font><br><br>'\n\tmsg = msg + '\\t</td></tr>'\n\n\tmsg = msg + '<td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:10px;padding-left:75px;padding-bottom:200px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '<a href=\"#\" style=\"color:#ffffff;text-decoration: none;display: inline-block;min-height: 38px;line-height: 39px;padding-right: 16px;padding-left: 16px;background: #1488CC;font-size: 14px;border-radius: 3px;border: 1px solid #1488CC;font-family:Garamond, EB Garamond, Georgia, serif; width:100px;text-align:center;\" target=\"_blank\">Rate & Review</a>'\n\tmsg = msg + '</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg",
"def send_main_email(self):\n\n print \"Sending main email\"\n \n # Make an html table to be body of email\n html_table = '<table style=\"font-size:12px\">'\n html_table += self.make_nfs_changed_rows(\"sprint\") # New features only\n html_table += self.make_nfs_changed_rows(\"status\") # New features only\n html_table += self.make_time_in_status_rows(self.stalled_nf_issues) \n html_table += self.make_time_in_status_rows(self.stalled_st_issues) # Sub-tasks\n html_table += '</table>' # Closing table tag\n\n recipients = self.config.get(\"recipients\", \"emails\").split(\"\\n\") # [recipients] section in .ini file\n \n# emails = self.config.items('recipients')\n# for key, email in emails:\n# recipients = ', '.join(self.config.items('recipients'))\n \n print recipients\n# sys.exit()\n self.send_email(recipients, html_table)",
"def email_body_appointment_confirmation_for_buyer(meeting, buyer_profile, sellr_profile, msg_url=\"https://127.0.0.1:5000/message?profile=xxxx\"):\n\tmsg = '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr><td align=\"center\" valign=\"top\"></td></tr></tbody></table>'\n\tmsg = msg + '<table cellspacing=\"0\" cellpadding=\"0\" width=\"100%\" bgcolor=\"#ffffff\"><tbody><tr>'\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6; border-top: 2px solid #e6e6e6\" cellspacing=\"0\" cellpadding=\"10\" width=\"600\">'\n\tmsg = msg + '<tbody>'\n\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #e6e6e6; border-bottom: 10px solid #FFFFFF; padding-top:75px; padding-left:58px\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<a href=\"https://insprite.co\"><img src=\"http://ryanfbaker.com/insprite/inspriteLogoA.png\" border=\"0\" alt=\"Insprite\" align=\"center\" width=\"200px\" height=\"55px\" /></a>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</tbody>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"85\" width=\"600\" height=\"350\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:14px;\">Ain\\'t life grand? Meeting\\'s on! <a href=\"https://127.0.0.1:5000/profile?'+ sellr_profile.prof_id + ' style=\"color:#1488CC\">\"' + sellr_profile.prof_name + '\" accepted your proposal.</a><br><br>'\n\tmsg = msg + '\\t\\t\\t Check out the details: <br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tTime: ' + meeting.meet_ts.strftime('%A, %b %d, %Y %H:%M %p') + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tDuration: ' + meeting.get_duration_in_hours() + ' hours<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tLocation: ' + str(meeting.meet_location) + '<br>'\n\tmsg = msg + '\\n\\t\\t\\t\\tFee: $' + str(meeting.meet_cost) + '<br><br>'\n\tmsg = msg + '\\t\\t\\t Need to edit, manage or update the appointment? <a href=\"https://127.0.0.1:5000/dashboard\" style=\"color:#1488CC\">Go for it</a>, or send <a href=\"'+msg_url+'\" style=\"color:#1488CC\">\"' + sellr_profile.prof_name + '\" a message.</a><br><br></font>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '\\n\\t<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\n\\t\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;padding-top:0px;padding-left:75px\" align=\"left\" valign=\"top\">'\n\tmsg = msg + '\\n\\t\\t\\t<img style=\"padding-right: 6px\" src=\"http://maps.googleapis.com/maps/api/staticmap?center=' + meeting.meet_location + '&zoom=15&size=400x450&markers=size:large%8Ccolor:0xFFFF00%7Clabel:Insprite%7C' + meeting.meet_location + '\">'\n\tmsg = msg + '\\n\\t\\t</td></tr>'\n\tmsg = msg + '\\n\\t</table>'\n\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/facebookIcon.png\">'\n\tmsg = msg + '\\t\\t<img style=\"padding-right: 6px\" src=\"http://ryanfbaker.com/insprite/twitterIcon.png\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/instagramIcon.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 5px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<img src=\"http://ryanfbaker.com/insprite/spacer-2.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr><td style=\"background-color: #ffffff; border-top: 0px solid #333333; border-bottom: 10px solid #FFFFFF;\" align=\"center\" valign=\"middle\">'\n\tmsg = msg + '\\t\\t<font style=\"font-family:Helvetica Neue;color:#555555;font-size:10px;\"> <a href=\"mailto:[email protected]\" style=\"color:#1488CC\">Contact Us</a>'\n\tmsg = msg + '\\t\\t| Sent by <a href=\"https://insprite.co\" style=\"color:#1488CC\">Insprite</a>, California, USA. | <a href=\"#\" style=\"color:#1488CC\">Unsubscribe</a></font>'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\n\tmsg = msg + '<table style=\"border-left: 2px solid #e6e6e6; border-right: 2px solid #e6e6e6;\" cellspacing=\"0\" cellpadding=\"0\" width=\"600\">'\n\tmsg = msg + '\\t<tr> <td style=\"border-top: 0px solid #333333; border-bottom: 0px solid #FFFFFF;\">'\n\tmsg = msg + '\\t\\t<img width=\"596px\" src=\"http://ryanfbaker.com/insprite/footerImage.png\">'\n\tmsg = msg + '\\t</td></tr>'\n\tmsg = msg + '</table>'\n\treturn msg",
"def send_contact_us_email(**data):\n mail_file = os.path.join(APP_PATH, \"templates\", \"main\", \"contact-us\",\n \"content.txt\")\n with open(mail_file, \"r\") as f:\n msg_text = f.read()\n msg_text = msg_text.format(**data)\n msg_html = render_template(\"main/contact-us/content.html\", **data)\n msg = Message(data[\"subject\"],\n sender=\"[email protected]\",\n recipients=[\"[email protected]\"])\n msg.body = msg_text\n msg.html = msg_html\n mail.send(msg)",
"def email_document(document, to, template='django_dms/email.txt', subject=''):\n # Start a new thread to email the document\n # This avoids a frozen screen while the email is being sent (particularly if the document is big).\n t = threading.Thread(target=_email_document, args=[document, to, template, subject])\n t.setDaemon(True)\n t.start()",
"def compose(self, msg, recipient):\n email = Email(msg, self, recipient)\n self.mailman.send(email)",
"def message_new(\n self, cr, uid, msg_dict, custom_values=None, context=None): \n if context is None:\n context = {}\n # prevent changes in context from \"bubbling up\" to calling methods\n local_context = dict(context)\n\n users_pool = self.pool.get('res.users')\n base_model = self.pool.get('ir.model.data')\n partner_model = self.pool.get('res.partner') \n\n # As the scheduler is run without language,\n # set the administrator's language\n if not local_context.get('lang'):\n user = users_pool.browse(cr, uid, uid, context=local_context)\n local_context['lang'] = user.partner_id.lang\n\n if custom_values is None:\n custom_values = {}\n email_from = msg_dict.get('from', False)\n if email_from:\n custom_values['name'] = _(\"Received by email from %s\") % email_from\n email_date = msg_dict.get('date', False)\n if email_date:\n custom_values['date_invoice'] = email_date\n\n company_id = (\n ('force_company' in local_context\n and local_context['force_company']) or False)\n\n # Retrieve partner_id from message dictionary.\n # Partner might be:\n # 1. Supplier sending email (author_id in msg dict.)\n # 2. Partner receiving message (special partner setup to receive\n # email). Should be linked to the appropiate company in multi-\n # company databases.\n # 3. Dummy invoice partner.\n # Partner MUST be a supplier.\n\n # 1. Try author:\n supplier_partner_id = False\n author_id = (\n 'author_id' in msg_dict and msg_dict['author_id'] or False)\n if (author_id\n and self._is_partner_supplier(\n cr, uid, author_id, context=local_context)):\n supplier_partner_id = author_id\n\n # 2. Try recipients:\n # Unfortunately we have to do a new lookup on partner, because\n # the method message_process in mail_thread removes the partner_ids\n # already found, from the message dictionary:\n if not supplier_partner_id:\n s = ', '.join(\n [msg_dict.get(h)\n for h in ['to', 'cc'] if msg_dict.get(h)])\n for email_address in tools.email_split(s):\n partner_ids = self.get_partner_from_mail(\n cr, uid, email_address, company_id, force_supplier=True,\n context=local_context)\n if partner_ids:\n supplier_partner_id = partner_ids[0]\n break\n\n # 3. Try default partner for company (company might be False):\n if not supplier_partner_id:\n args = [('fetchmail_invoice_default', '=', True),]\n if company_id:\n args.append(('company_id', '=', company_id))\n default_ids = partner_model.search(\n cr, uid, args, context=local_context)\n if default_ids: # can be only one\n supplier_partner_id = default_ids[0]\n\n # We should have a supplier/partner by now....\n assert supplier_partner_id, _('No partner found to link invoice to')\n\n # Get company for supplier, if any. If present, should be the same\n # as company for fetchmail config, if present. If still no\n # company is found, use main company.\n supplier_record = partner_model.read(\n cr, uid, supplier_partner_id, ['company_id', 'supplier'],\n context=local_context)\n supplier_company_id = (\n supplier_record['company_id'] and supplier_record['company_id'][0]\n or False)\n if supplier_company_id:\n if company_id:\n assert company_id == supplier_company_id, (_(\n 'Supplier found not valid for company %d.') %\n company_id)\n else:\n company_id = supplier_company_id\n if not company_id:\n # Last resort, use main company\n company_id = base_model.get_object_reference( \n cr, uid, 'base', 'main_company')[1]\n \n # Now we should have a company, and we should use it for everything\n assert company_id, (_(\n 'All attempts to determine company for invoice failed'))\n local_context['force_company'] = company_id\n \n # Paranoid check\n assert supplier_record['supplier'], (_(\n 'Partner %d is not a supplier') % supplier_partner_id)\n\n # And we should have an account property\n # (read again, as company might have changed)\n supplier_record = partner_model.read(\n cr, uid, supplier_partner_id, ['property_account_payable_id'],\n context=local_context)\n assert supplier_record['property_account_payable_id'], (\n _('No account payable on partner %d.') % supplier_partner_id)\n\n # And we need some information in context as well\n local_context.update({\n 'company_id': company_id,\n 'type': 'in_invoice',\n })\n\n supplier = partner_model.browse(cr, uid, supplier_partner_id, context=local_context)\n\n journal_id = self.pool.get('account.invoice').default_get(cr, uid, ['journal_id'], context=local_context)['journal_id']\n if not journal_id:\n raise UserError(_('Please define an accounting sale journal for this company.'))\n\n custom_values.update({\n 'company_id': company_id,\n 'partner_id': supplier_partner_id,\n 'type': 'in_invoice',\n\n 'account_id': supplier.property_account_payable_id.id,\n 'journal_id': journal_id,\n })\n\n\n # custom_values.update(\n # self.onchange_partner_id(\n # cr, uid, [], 'in_invoice', supplier_partner_id,\n # company_id=company_id)['value'])\n\n # Create the resource\n res_id = super(account_invoice, self).message_new(\n cr, uid, msg_dict, custom_values=custom_values,\n context=local_context)\n return res_id"
] |
[
"0.73784035",
"0.647709",
"0.6436048",
"0.6249949",
"0.6230297",
"0.61297333",
"0.60587174",
"0.59738946",
"0.5970479",
"0.59629035",
"0.57741827",
"0.5760959",
"0.5647457",
"0.5546402",
"0.55030817",
"0.5446904",
"0.53914595",
"0.536303",
"0.5356848",
"0.5346636",
"0.5345804",
"0.53432846",
"0.53340894",
"0.5279925",
"0.5275254",
"0.5260225",
"0.52559614",
"0.5248557",
"0.52465105",
"0.5232883"
] |
0.6925168
|
1
|
Get preditors base on their distance The predictors are selected as following [1,2], [1,3], [1,4], [2,3], [2,4], [2,5], [2,6]
|
def getpredictors_distance( staname, distance):
distfromsta = distance[staname]
try:
del distfromsta[staname] # remove the station to be fill from the dataframe
except:
pass
distfromsta = distfromsta.sort_values()
stations = distfromsta.index
sel1 = [(i, e) for i, e in zip(stations[0:-1], stations[1:])] # selction predictors with spacing 1
sel2 = [(i, e) for i, e in zip(stations[0:-2], stations[2:])] # selction predictors with spacing 2
selection= [None] * (len(sel1) + len(sel2))
selection[::2] = sel1
selection[1::2] = sel2
return selection[:4]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def __getpredictors_distance(self, staname, distance):\n\n distfromsta = distance[staname]\n del distfromsta[staname] # remove the station to be fill from the dataframe\n distfromsta = distfromsta.sort_values()\n\n stations = self.network.getsta(distfromsta.index.values)\n # station = self.network.getsta(staname)\n\n # Only 3 closest stations\n # sel1 = [ (i,e) for i,e in zip(stations[0:2], stations[1:3])] # selction predictors with spacing 1\n # sel2 = [ (i,e) for i,e in zip(stations[0:2], stations[2:4])] # selction predictors with spacing 2\n\n # Use all stations\n sel1 = [(i, e) for i, e in zip(stations[0:-1], stations[1:])] # selction predictors with spacing 1\n sel2 = [(i, e) for i, e in zip(stations[0:-2], stations[2:])] # selction predictors with spacing 2\n\n # sel3 = [ (i,e) for i,e in zip(stations[0:-3], stations[3:])] # selction predictors with spacing 3\n # sel4 = [ (i,e) for i,e in zip(stations[0:-4], stations[4:])] # selction predictors with spacing 4\n\n # Only 3 closest stations\n # sel1names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:2], stations[1:3])] # selction predictors with spacing 1\n # sel2names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:2], stations[2:4])] # selction predictors with spacing 1\n\n # using all stations\n sel1names = [(i.getpara('stanames'), e.getpara('stanames')) for i, e in\n zip(stations[0:-1], stations[1:])] # selction predictors with spacing 1\n sel2names = [(i.getpara('stanames'), e.getpara('stanames')) for i, e in\n zip(stations[0:-2], stations[2:])] # selction predictors with spacing 1\n\n # sel3names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:-3], stations[3:])] # selction predictors with spacing 1\n # sel4names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:-4], stations[4:])] # selction predictors with spacing 1\n\n selection = [x for x in itertools.chain.from_iterable(itertools.izip_longest(sel1, sel2)) if x]\n selectionnames = [x for x in itertools.chain.from_iterable(itertools.izip_longest(sel1names, sel2names)) if x]\n\n return selection, selectionnames",
"def predict(x):\n file_train = open('trains.pkl', \"rb\")\n train = pkl.load(file_train)\n y = []\n k = 5\n x_train = train[0]\n y_train = train[1]\n for q in range(100):\n distance = []\n for i in range(800):\n distance.append(np.linalg.norm(x[q] - x_train[i]))\n\n # distance.append(np.sqrt(sum((x[q] - x_train[i]) ** 2)))\n # u = (x[0] - x_train) ** 2\n # print(distance)\n # distance = np.sqrt([sum(b) for b in u])\n # print(distance)\n minarg = np.argsort(distance)\n i = np.array(np.zeros(10))\n j = 0\n while k not in i:\n i[y_train[minarg[j]]] += 1\n j += 1\n y.append(np.argmax(i))\n return y",
"def pred_for_user(self,u):\r\n ids=np.where(self.Y_data_n[:,0]==u)[0]\r\n items_rated_by_u=Y_data_n[ids,1].tolist()\r\n pred_ratings=[]\r\n for i in range(self.n_items):\r\n if i not in items_rated_by_u:\r\n pred_ratings.append(self.pred(u,i))\r\n return pred_ratings",
"def predict(self, X):\n\n Xn = np.copy(X)\n\n preds = []\n # compute distance from all points\n for x1 in Xn:\n dist = self._euclidian_distance(self.X_data, x1)\n dist = np.vstack((dist, self.y)).T\n dist = dist[dist[:, 0].argsort(axis=0)][:,-1]\n # get a vote from top k\n pred = sts.mode(dist[0:self.k])[0][0]\n preds.append(pred)\n\n return np.array(preds)",
"def oldPredict(self, data):\n\n predictions = []\n\n if len(self.observations) < self.k_neighbors:\n print(f\"Data length ({len(data)}) was too small.\")\n\n for row in data:\n neighbors_info = {}\n\n for row_index in range(len(self.observations)):\n distance = self.calcualteEuclideanDistance(self.observations[row_index], row)\n if len(neighbors_info) > self.k_neighbors - 1:\n largest_distance = max(neighbors_info.keys())\n if distance < largest_distance:\n neighbors_info[distance] = self.labels[row_index]\n del neighbors_info[largest_distance]\n else:\n neighbors_info[distance] = self.labels[row_index]\n\n unique_values = set(neighbors_info.values())\n if len(unique_values) == 1:\n value = unique_values.pop()\n predictions.append(value)\n else:\n best_value = 0\n best_value_weight = 0\n for label in unique_values:\n weight = 0\n for distance in neighbors_info.keys():\n if label == neighbors_info[distance]:\n if 'inverse_distance' == self.weight_type:\n weight += self.calulateWeightedVote(distance)\n elif 'no_weight' == self.weight_type:\n weight += 1\n else:\n print(\"Not a valid_weight_type.\")\n\n if weight > best_value_weight:\n best_value_weight = weight\n best_value = label\n\n predictions.append(best_value)\n # print(f\"Neighbors Info: {neighbors_info}\")\n\n return predictions",
"def predict(self, X):\n labels = []\n for i in range(0,len(X)):\n min_distance = distance.euclidean(X[i],self.best_medoids[0])\n min_distance_index = 0\n\n for j in range(1,len(self.best_medoids)):\n current_distance = distance.euclidean(X[i],self.best_medoids[j])\n if(current_distance < min_distance):\n min_distance = current_distance\n min_distance_index = j\n\n labels.append(min_distance_index)\n return labels\n\n pass",
"def predict(self, predPoints=None):",
"def predict_individual(self,Xtest,nn_list):\n\n #calculate distances first\n self.dist_calc(Xtest)\n\n ypred = []\n\n\n for nn in nn_list:\n\n neigh_ind = self.ind[:,nn-1] #subtract 1 since it is zero based\n\n ypred.append(self.ytrain[neigh_ind])\n\n self.ypred = ypred\n\n return ypred",
"def predict_individual(self,Xtest,nn_list):\n\n #calculate distances first\n self.dist_calc(Xtest)\n\n ypred = []\n\n\n for nn in nn_list:\n\n neigh_ind = self.ind[:,nn-1]# subtract 1 since it is zero based\n\n ypred.append(self.ytrain[neigh_ind])\n\n self.ypred = ypred\n\n return ypred",
"def get_prediction(data):\n # load cannabis data\n strains = pd.read_csv(URL)\n # Combine the Effects and Flavors in one column\n strains['Criteria'] = strains['Effects'] + ',' + strains['Flavor']\n\n # Train model on dtm\n nn = NearestNeighbors(n_neighbors=5, algorithm='ball_tree')\n nn.fit(dtm)\n\n # load request data\n # r = data.args\n entry = [v for k,v in data.items()][1:]\n #print(entry)\n # transform\n new = tf.transform(entry)\n #print(new)\n results = nn.kneighbors(new.todense())\n #print(results)\n # extract top 5 results\n output = [strains['Strain'][results[1][0][i]] for i in range(5)]\n\n return output",
"def predict(self, query: np.ndarray):\n assert query.shape == self._training_set[1, :-1].shape, \"Size of the query does not match the size of the\" \\\n \" training set, Which is: \"\\\n + str(self._training_set[1, :-1].shape)\n tmp = (self._training_set[:, :-1] - query).astype(float)\n distances = np.linalg.norm(tmp, axis=1)\n\n index = np.argsort(distances)\n sorted_set = self._training_set[index, :]\n\n (unique, counts) = np.unique(sorted_set[:self._k, -1], return_counts=True)\n\n return unique[counts == np.max(counts)][0]",
"def preprocess(df):\n df[\"distance\"] = compute_distance(df)\n X_train = df[[\"distance\"]]\n y_train = df[\"fare_amount\"]\n return X_train, y_train",
"def predict_only(self):",
"def get_predictors(self):\n\t\treturn self.predictors",
"def predict(self,data):\n results = []\n predict_instances = np.shape(data)[0]\n stored_instances = np.shape(self.data)[0]\n for predict_index in range(predict_instances):\n neighbors = [] # dist, label\n for stored_index in range(stored_instances):\n neighbors.append((self._distance(self.data[stored_index], data[predict_index]), self.data_labels[stored_index][0], data[predict_index]))\n neighbors = sorted(neighbors, key=lambda x: x[0])[:self.k]\n results.append(self._analyze_neighbors(neighbors))",
"def predict_labels(self, dists, k=1):\n num_test = dists.shape[0]\n y_pred = np.zeros(num_test)\n for i in range(num_test):\n indices = np.argsort(dists[i])[:k]\n closest_y = self.y_train[indices]\n y_pred_i = mode(closest_y)[0]\n y_pred[i] = y_pred_i\n return y_pred",
"def post_predictive_distribution(self, samples):\n post_pred_dist = []\n posteriors = self.posterior(samples)\n for point in range(1, self.max_val+1):\n post_pred = 0\n for concept, posterior in list(zip(self.concepts, posteriors)):\n if point in concept.extension:\n post_pred += posterior\n post_pred_dist.append(post_pred)\n return post_pred_dist",
"def predict(self, test):\n test_data = np.asarray(test)\n assert self.x is not None and self.y is not None, \"You must train the classifier before testing\"\n results = []\n for i in range(test_data.shape[0]):\n m = self.x - test_data[i]\n # dist holds the Euclidean distance to every training point\n dist = np.sum(m*m, 1)\n # this call uses a quickselect algo to find k-smallest\n ind = np.argpartition(dist, self.k)[:self.k]\n # take the class present the most among the k closest\n out = int(scipy.stats.mode(self.y[ind], axis=None)[0])\n results.append(out)\n return results",
"def _calc_distances(preds, targets, mask, normalize):\n N, K, _ = preds.shape\n _mask = mask.copy()\n _mask[np.where((normalize == 0).sum(1))[0], :] = False\n distances = np.full((N, K), -1, dtype=np.float32)\n normalize[np.where(normalize <= 0)] = 1000000.0\n distances[_mask] = np.linalg.norm(((preds - targets) / normalize[:, None, :])[_mask], axis=-1)\n return distances.T",
"def estimate_dists(self) -> np.array:\n return np.array(\n list(\n chain.from_iterable(\n model.estimate_dist(self.featurized_data)\n for model in self.models\n )\n )\n )",
"def predict(self,Xtest,nn_list):\n\n self.dist_calc(Xtest)\n xsize = self.dist.shape[0]\n ysize = self.ytrain.shape[1]\n ypred = []\n\n for nn in nn_list:\n\n yp = np.empty((xsize,ysize))\n\n if self.weights =='uniform':\n\n neigh_ind = self.ind[:,0:nn]\n\n for j in range(self.ytrain.shape[1]):\n\n mode = utilities.quick_mode_axis1_keep_nearest_neigh(\n self.ytrain[neigh_ind,j].astype(int))\n yp[:,j] = mode\n\n\n elif self.weights=='distance':\n dist = self.dist[:,0:nn]\n neigh_ind = self.ind[:,0:nn]\n W = 1./(dist+.000001) #to make sure we dont divide by zero\n\n for j in range(self.ytrain.shape[1]):\n mode, _ = utilities.weighted_mode(self.ytrain[neigh_ind,j].astype(int), W, axis=1)\n\n mode = np.asarray(mode.ravel(), dtype=int)\n\n yp[:, j] = mode\n\n ypred.append(yp)\n\n self.ypred = ypred\n\n return ypred",
"def predict(self, dists, k=1):\n s = np.argsort(dists, axis=1)\n y_pred = np.zeros(dists.shape[0])\n for i in range(dists.shape[0]):\n y_pred[i] = np.argmax(np.bincount(self.ytr[s[i,:k]]))\n return y_pred",
"def predict(self,X,y):\n self.X_test = X\n self.y_test = y\n d = []\n for i in range(self.X_train.shape[0]):\n d.append(self.get_distance(self.X_train.ix[i,:])) # hold all distances\n sorted = np.argsort(d)\n k_indices = np.argsort(d)[:self.k] # get indices with lowest distances\n predictions = self.y_train[k_indices]\n unique, counts = np.unique(predictions,return_counts=True)\n\n if (np.where(predictions ==1)[0].shape[0]) >self.p*self.k:\n y_pred = 1\n else:\n y_pred=0\n # {'sample':X_test.name,'d':d,'k_ix':k_indices,'pred':predictions,\n # 'counts':counts,'uniq':unique,'y_pred':y_pred,\n # 'y_test':self.y_test,'y_train':self.y_train,\n # 'sorted':sorted}\n return {'sample':self.X_test.name,\n 'y_pred':y_pred, \n 'y_test':self.y_test}",
"def nearest_neighbors_classifier(data):\n clf = KNeighborsClassifier(3, 'distance')\n clf.name = \"KNN\"\n train_predict_and_results(data, clf)",
"def predict(self,Xtest,nn_list):\n\n #calculate distances first\n self.dist_calc(Xtest)\n\n ypred = []\n\n for nn in nn_list:\n\n neigh_ind = self.ind[:,0:nn]\n\n if self.weights == 'uniform':\n\n p = np.mean(self.ytrain[neigh_ind], axis=1)\n\n elif self.weights =='distance':\n\n p = np.empty((self.dist.shape[0], self.ytrain.shape[1]), dtype=np.float)\n\n for i in range(self.ytrain.shape[1]):\n p[:,i] = utilities.weighted_mean(self.ytrain[neigh_ind,i], self.dist[:,0:nn])\n\n ypred.append(p)\n\n self.ypred = ypred\n self.nn_list = nn_list\n return ypred",
"def LevDistMultilabels(y_true, y_pred):\n \n n = y_pred.shape[0]\n D = 0\n for i in range(n):\n D += LevenshteinDistance(y_pred[i,:], y_true[i,:])[-1, -1]\n return D/n",
"def getDistances(trainingSet, testInstance, distances):\n # Empty list to store distances of between testInstance and each trainSet item\n # Number of dimensions to check\n length=len(testInstance) - 1\n # Iterate through all items in trainingSet and compute the distance, then append to the distances list\n for x in range(len(trainingSet)):\n dist=calculateDistance(testInstance, trainingSet[x], length)\n distances.append((trainingSet[x], dist))\n return distances",
"def _predict_base(self, X):\n\n # Return the indices of the BMU which matches the input data most\n distances = []\n\n prev_activation = np.zeros((self.map_dim, self.data_dim))\n\n for x in X:\n distance, prev_activation = self._get_bmus(x, prev_activation=prev_activation)\n distances.append(distance)\n\n return distances",
"def _get_closest(centers, features):\n pred_labels = []\n\n features = features\n for feature in features:\n distances = End2End._dist(centers, feature)\n pred_labels.append(distances.argmin().item())\n\n return np.array(pred_labels)",
"def predict(self, data):\n\t\treturn closestCluster(data, self.centers)"
] |
[
"0.63841313",
"0.59920394",
"0.5903511",
"0.5846713",
"0.5797452",
"0.5794031",
"0.5764465",
"0.57157874",
"0.57146764",
"0.571108",
"0.5700389",
"0.56553006",
"0.5652828",
"0.5636438",
"0.56238115",
"0.5598508",
"0.55349874",
"0.55291295",
"0.55164886",
"0.5492974",
"0.54692423",
"0.5461225",
"0.5450445",
"0.542296",
"0.5393487",
"0.5379403",
"0.537781",
"0.5301012",
"0.52867323",
"0.52719057"
] |
0.6556841
|
0
|
DESCRIPTION Check every variable of every stations and try to fill them with the variables of the two nearest station for every time. INPUT
|
def fillstation(self, stanames, all=None, plot=None, summary=None, From=None, To=None, by=None,
how='mean', variables=None, distance=None, sort_cor=True, constant=True, cor_lim=None):
if all == True:
stations = self.network.getsta([], all=True).values()
else:
stations = self.network.getsta(stanames)
for station in stations:
staname = station.getpara('stanames')
if variables == None:
newdataframe = station.getData(reindex=True, From=From, To=To, by=by,
how=how) # Dataframe which stock the new data of the stations
newdataframe['U m/s'] = station.getData('U m/s', reindex=True, From=From, To=To, by=by, how=how)
newdataframe['V m/s'] = station.getData('V m/s', reindex=True, From=From, To=To, by=by, how=how)
newdataframe['Ua g/kg'] = station.getData('Ua g/kg', reindex=True, From=From, To=To, by=by, how=how)
newdataframe['Theta C'] = station.getData('Theta C', reindex=True, From=From, To=To, by=by, how=how)
variables_name = newdataframe.columns
else:
newdataframe = station.getData(var=variables, reindex=True, From=From, To=To, by=by,
how=how) # Dataframe which stock the new data of the stations
variables_name = variables
# select and sort nearest stations
selections, selectionsnames = self.__getpredictors_distance(staname, distance)
for var in variables_name:
print("I" * 30)
print("variable -> " + var)
try:
selections, params = self.__sort_predictors_by_corr(station, selections, var, From, To, by, how,
constant=constant,
selectionsnames=selectionsnames,
sort_cor=sort_cor, cor_lim=cor_lim)
selections_iter = iter(selections)
params_iter = iter(params)
# print newdataframe
idxmissing = newdataframe[var][
newdataframe[var].isnull() == True].index # slect where their is missing data
while len(idxmissing) > 0:
print("Their is [" + str(len(idxmissing)) + "] events missing")
try: # Try if their is still other stations to fill with
selection = selections_iter.next()
param = params_iter.next()
except StopIteration:
print("NO MORE SELECTED STATIONS")
break
try:
Y = station.getData(var, From=From, To=To, by=by, how=how) # variable to be filled
X1 = selection[0].getData(var, From=From, To=To, by=by,
how=how) # stations variable used to fill
X2 = selection[1].getData(var, From=From, To=To, by=by,
how=how) # stations variable used to fill
select = pd.concat([X1, X2], keys=['X1', 'X2'], axis=1, join='inner').dropna()
if constant:
newdata = param[0] + param[1] * select['X1'] + param[2] * select[
'X2'] # reconstruct the data
else:
newdata = param[0] * select['X1'] + param[1] * select['X2'] # reconstruct the data
newdataframe.loc[idxmissing, var] = newdata.loc[idxmissing, var]
idxmissing = newdataframe[var][
newdataframe[var].isnull() == True].index # slect where their is missing data
except KeyError:
print("&" * 60)
print('Selected stations did not fill any events')
except ValueError:
print('The variable ' + var + "Does not exist or no data to do the multilinear regression ")
if plot == True:
df = pd.concat([Y, X1, X2, newdata, newdataframe[var]],
keys=['Y', 'X1', 'X2', 'estimated data', 'Estimated replaced'], axis=1,
join='outer')
self.plotcomparison(df)
print("Their is [" + str(len(idxmissing)) + "] FINALLY events missing")
# Recalculate the wind direction and speed from the U an V components
try:
speed, dir = cart2pol(newdataframe['U m/s'], newdataframe['V m/s'])
newdataframe['Dm G'] = dir
newdataframe['Sm m/s'] = speed
except ValueError:
print
'No wind found in the dataframe'
except KeyError:
print('No wind found in the dataframe')
self.newdataframes[staname] = newdataframe
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def test_nearest(self):\n dist = station.nearest(28.43, -81.31)\n stn = dist.pop(\"station\")\n self.assertIsInstance(stn, station.Station)\n self.assertEqual(stn.icao, \"KMCO\")\n for val in dist.values():\n self.assertIsInstance(val, float)\n for *params, count in (\n (30, -82, 10, True, True, 0.2, 1),\n (30, -82, 10, True, False, 0.2, 5),\n (30, -82, 10, False, False, 0.2, 6),\n (30, -82, 1000, True, True, 0.5, 6),\n (30, -82, 1000, False, False, 0.5, 37),\n ):\n stations = station.nearest(*params)\n self.assertEqual(len(stations), count)\n for dist in stations:\n stn = dist.pop(\"station\")\n self.assertIsInstance(stn, station.Station)\n for val in dist.values():\n self.assertIsInstance(val, float)",
"def test_nearest_filter(self):\n for airport, reports, count in (\n (True, True, 6),\n (True, False, 16),\n (False, True, 6),\n (False, False, 30),\n ):\n stations = station.nearest(30, -80, 30, airport, reports, 1.5)\n self.assertEqual(len(stations), count)",
"def test_nearest(self):\n for lat, lon, icao in ((28.43, -81.31, \"KMCO\"), (28.43, -81, \"KTIX\")):\n stn, dist = station.Station.nearest(lat, lon, is_airport=True)\n self.assertIsInstance(stn, station.Station)\n self.assertEqual(stn.icao, icao)\n for val in dist.values():\n self.assertIsInstance(val, float)\n # Test with IATA req disabled\n stn, dist = station.Station.nearest(28.43, -81, False, False)\n self.assertIsInstance(stn, station.Station)\n self.assertEqual(stn.icao, \"FA18\")\n for val in dist.values():\n self.assertIsInstance(val, float)",
"def Find_nearest_dwd_stations(inpt_data,\r\n date_start='20051201',\r\n date_end='20201231',\r\n dwd_time_format='%Y%m%d%H',\r\n data_category='air_temperature',\r\n temp_resolution='hourly',\r\n no_of_nearest_stations=4,\r\n memory_save=True,\r\n Output='True'):\r\n if isinstance(data_category,list):\r\n if len(list(data_category)) > 1:\r\n print(\r\n 'Currently only one dwd category allowed, please run function multiple times for each category'\r\n )\r\n return None\r\n \r\n #convert time to datetime\r\n dt_start=datetime.strptime(date_start,'%Y%m%d')\r\n dt_end=datetime.strptime(date_end,'%Y%m%d')\r\n print('Start quering data from DWD')\r\n #define the database folder\r\n pypath = os.path.dirname(os.path.abspath(__file__))\r\n table_dir = pypath + '\\\\' + 'tables'\r\n dbase_dir = pypath + '\\\\' + 'dbase' \r\n #%% we check all available stations and create a valid list\r\n filename_stations=update_stationlist(time_res='hourly',dbase_dir=table_dir)\r\n stations_all=pd.read_csv(filename_stations, dtype={'STATIONS_ID': object})\r\n # delete all stations which do not cover the category\r\n dwd_stations=stations_all[stations_all[data_category]==True].copy()\r\n #correct to datetime\r\n dwd_stations['date_end']=pd.to_datetime(stations_all.date_end,format='%Y%m%d')\r\n dwd_stations['date_start']=pd.to_datetime(stations_all.date_start,format='%Y%m%d')\r\n # clean to stations which cover the campaign time #dt_low <= dt <= dt_high:\r\n dwd_stations=dwd_stations[(dwd_stations.date_start<=dt_start) & (dwd_stations.date_end>=dt_end)]\r\n #make a geodataframe out of it\r\n dwd_stations=gpd.GeoDataFrame(dwd_stations,geometry=gpd.points_from_xy(dwd_stations.geo_lon, dwd_stations.geo_lat))\r\n \r\n #loop through all rows to get the n closest points\r\n distances=pd.DataFrame()\r\n for _, station in dwd_stations.iterrows():\r\n distances[station.STATIONS_ID]=inpt_data.distance(station.geometry)\r\n \r\n #%% get the n stations with smallest distance and update database\r\n id_nearest_stations=distances.apply(lambda s: s.nsmallest(no_of_nearest_stations).index.tolist(), axis=1).values.tolist() #station ids\r\n #get them as unique values by sum a list of lists https://bit.ly/353iZQB\r\n id_dwd_stations=list(set(sum(id_nearest_stations,[])))\r\n \r\n #update the database\r\n db_dwd_stations=import_stations(time_res=temp_resolution,time_format=dwd_time_format,campaign_time=[dt_start,dt_end],data_category=data_category,station_ids=id_dwd_stations,dbase_dir=dbase_dir,Output=Output,table_dir=table_dir,memory_save=memory_save)\r\n \r\n #distance of nearest stattions\r\n dist_nearest_stations=pd.DataFrame(np.sort(distances.values)[:,:no_of_nearest_stations]).values.tolist() #distances themself\r\n #create new columns in the input data\r\n station_col_nm=list()\r\n for i in range(0,no_of_nearest_stations):\r\n station_col_nm.append(data_category+'_station_'+str(i))\r\n for i in range(0,no_of_nearest_stations):\r\n station_col_nm.append(data_category+'_distance_'+str(i))\r\n #create new dataframe\r\n distance_data=pd.concat([pd.DataFrame(id_nearest_stations).astype(int),pd.DataFrame(dist_nearest_stations)],axis=1)\r\n distance_data.columns=station_col_nm\r\n #add to main dataset\r\n inpt_data=pd.concat([inpt_data, distance_data],axis=1) \r\n \r\n return inpt_data,db_dwd_stations",
"def interpolate(self, var, time, lat, lon):\n\n # Get the nearest four points in space\n # Check to see if lat/lons are 2d or 1d\n if len(self['lat'].shape) == 2:\n closey, closex, distances = self.nearest_points(lat, lon, npt=4)\n # Distances in km\n# distances = np.array([self.haversine(\n# (self['lat'][y,x].values, self['lon'][y,x].values),\n# (lat, lon)) for y,x in \n# zip(list(closey), list(closex))])\n else:\n closen = self.nearest_points(lat, lon, npt=4)\n closey = closen\n closex = closen\n # Distances in km\n distances = np.array([self.haversine(\n (self['lat'][n].values, self['lon'][n].values),\n (lat, lon)) for n in list(closen)])\n # Check for exact match (within some tolerance)\n spaceweights = np.zeros(distances.shape)\n if (distances < 1.0).sum() > 0:\n spaceweights[distances.argmin()] = 1\n else:\n # Here, inverse distance weighting (for simplicity)\n spaceweights = 1.0 / distances\n spaceweights /= spaceweights.sum()\n # Get weights in time\n #time64 = np.datetime64(time)\n #all the valid times in the ensemble\n valids = self['validtime'].values\n timeweights = np.zeros(valids.shape)\n # Check if we are outside the valid time range\n if (time < valids[0]) or (time > valids[-1]):\n print(\"Interpolation is outside of time range in state!\")\n return None\n # Find where we are in this list\n #index after the time of the observation\n lastdex = (valids >= time).argmax()\n # If we match a particular time value, then\n # this is just an identity\n if valids[lastdex] == time:\n # Just make a one at this time\n timeweights[lastdex] = 1\n else:\n # Linear interpolation\n #often going to be 6 hours, subtracts datetime objects I think\n diff = (valids[lastdex] - valids[lastdex-1])\n #print(valids[lastdex], valids[lastdex-1], diff)\n #often going to be 21600 seconds\n totsec = diff.seconds\n #totsec = np.abs(diff / np.timedelta64(1, 's'))\n #ST\n #calculate time difference between time after and time of observation\n #the abs will make this positive definite, which is okay since\n #the difference will always be negative\n thisdiff = abs(time - valids[lastdex])\n #thissec = np.abs(thisdiff / np.timedelta64(1,'s'))\n thissec = thisdiff.seconds\n # Put in appropriate weights\n #ST switched the -1 between the two lines to match up with the positive-\n #definite thisdiff\n timeweights[lastdex-1] = float(thissec) / totsec\n timeweights[lastdex] = 1.0 - (float(thissec)/totsec)\n # Now that we have the weights, do the interpolation\n #ST an ntimes x 4 x nens array\n interp = self.variables[var].values[:,closey,closex,:]\n # Do a dot product with the time weights\n # And with the space weights\n if len(interp.shape) == 3:\n interp = (timeweights[:,None,None] * interp).sum(axis=0)\n else:\n interp = (timeweights[:,None,None,None] * interp).sum(axis=0)\n \n if len(interp.shape) == 3:\n #ST Changed 2nd : to None\n interp = (spaceweights[:,None,None] * interp).sum(axis=1)\n else:\n interp = (spaceweights[:,None] * interp).sum(axis=0)\n # Return estimate from all ensemble members\n return interp",
"def run(self):\n # Cache parameters and arrays\n nstat = self.north.shape[1]\n ind = self.istart\n solver = self.solver\n cutoff = self.cutoff\n shared = self.shared\n\n # Check if penalties are arrays\n arrflag = [isinstance(arr, np.ndarray) for arr in [self.penn,self.pene,self.penu]]\n arrflag = reduce(operator.mul, arrflag, 1)\n\n # Loop over my portion of GPS stations\n for jj in range(nstat):\n # Unpack component-wise indices of valid observations\n bool_east, bool_north, bool_up = self.bool_list[jj]\n # Extract valid observations\n dnorth, deast, dup = (self.north[bool_north,jj], \n self.east[bool_east,jj], \n self.up[bool_up,jj])\n wn, we, wu = (self.wn[bool_north,jj], \n self.we[bool_east,jj], \n self.wu[bool_up,jj])\n Gn, Ge, Gu = self.G[bool_north,:], self.G[bool_east,:], self.G[bool_up,:]\n # Perform estimation and store weights\n if arrflag:\n northPen, eastPen, upPen = self.penn[jj,:], self.pene[jj,:], self.penu[jj,:]\n else:\n northPen, eastPen, upPen = self.penn, self.pene, self.penu\n shared.m_north[:,ind], qn = solver.invert(dmultl(wn,Gn), wn*dnorth, northPen)\n shared.m_east[:,ind], qe = solver.invert(dmultl(we,Ge), we*deast, eastPen)\n shared.m_up[:,ind], qu = solver.invert(dmultl(wu,Gu), wu*dup, upPen)\n # Now modify the shared penalty array\n if arrflag:\n shared.penn[ind,:] = qn[cutoff:]\n shared.pene[ind,:] = qe[cutoff:]\n shared.penu[ind,:] = qu[cutoff:]\n ind += 1\n\n # done\n return",
"def test_get_closest_stations(self):\n\t\tpoint = \"POINT(40.71911552 -74.00666661)\"\n\t\tstations = set(server.get_closest_stations(point))\n\t\t# find the closest stations, make them a set of objects see if sets intersect completely",
"def multi_velo_inspec(self, n = 60, lat0 = 60, lat1 = 90, pole = \"north\"):\n inds = self.mlat_finder(lat1, lat0, pole)[1]\n NeA = self.NeA[inds]\n NeB = self.NeB[inds]\n NeC = self.NeC[inds]\n\n secondsA = self.secondsA[inds]\n secondsB = self.secondsB[inds]\n secondsC = self.secondsC[inds]\n\n\n mlatA = self.mlatA[inds]\n mlatB = self.mlatB[inds]\n mlatC = self.mlatC[inds]\n\n mean_range = 5\n NeA = self.meanie(NeA, mean_range)\n NeB = self.meanie(NeB, mean_range)\n NeC = self.meanie(NeC, mean_range)\n \n \n N = int((len(NeA)/n*2) - 1) #nr of windows\n \n dx = (secondsB[1]-secondsB[0])*self.velB[0]\n \n nBAs = []\n nBCs = []\n nACs = []\n \n for i in range(N):\n startind = int(i/2*n)\n stopind = int((i/2+1)*n)\n temp_NeA = NeA[startind:stopind]\n temp_NeB = NeB[startind:stopind]\n temp_NeC = NeC[startind:stopind]\n \n temp_secondsA = secondsA[startind:stopind]\n temp_secondsB = secondsB[startind:stopind]\n temp_secondsC = secondsC[startind:stopind]\n \n \n curr_timediff = np.round((temp_secondsB[1:] - temp_secondsB[:-1])-(1/self.fs))\n if np.sum(curr_timediff) > 2:\n continue\n \n gradA = (temp_NeA[1:] - temp_NeA[:-1])/dx\n gradB = (temp_NeB[1:] - temp_NeB[:-1])/dx\n gradC = (temp_NeC[1:] - temp_NeC[:-1])/dx\n \n if np.max(gradA) < 0.9:\n continue\n \n stdA = np.std(gradA)\n stdB = np.std(gradB)\n stdC = np.std(gradC)\n \n meanA = temp_secondsB[np.where(gradA == np.max(gradA))][0]\n meanB = temp_secondsB[np.where(gradB == np.max(gradB))][0]\n meanC = temp_secondsB[np.where(gradC == np.max(gradC))][0]\n \n p0A = [1, meanA, stdA]\n p0B = [1, meanB, stdB]\n p0C = [1, meanB, stdB]\n \n poptA, pcovA = curve_fit(self.gaussian, temp_secondsB[:-1], gradA, p0 = p0A)\n poptB, pcovB = curve_fit(self.gaussian, temp_secondsB[:-1], gradB, p0 = p0B)\n poptC, pcovC = curve_fit(self.gaussian, temp_secondsB[:-1], gradC, p0 = p0C)\n \n nBA = poptB[1] - poptA[1]\n nBC = poptB[1] - poptC[1]\n nAC = poptA[1] - poptC[1]\n \n nBAs.append(nBA)\n nBCs.append(nBC)\n nACs.append(nAC)\n \n \n sBA = self.BA_shift/2 #time delay BA\n sBC = self.BC_shift/2 #time delay BC\n sAC = (self.BC_shift - self.BA_shift)/2\n V = self.velA[0]\n for i in range(len(nBAs)):\n VBA = self.along_track_velo(V, sBA, nBAs[i])\n VBC = self.along_track_velo(V, sBC, nBCs[i])\n VAC = self.along_track_velo(V, sAC, nACs[i])\n \n print(VBA)\n print(VBC)\n print(VAC)\n print(\"________________________________________\")",
"def check_latlon(self):\n\n for station in list(self.station_list.values()):\n station_def = self.station_definitions[station.name]\n lat = float(station.get_obs('LAT')[0])\n lon = float(station.get_obs('LON')[0])\n lat_diff = abs(lat - station_def['lat'])\n lon_diff = abs(lon - station_def['lon'])\n if lat_diff > .1:\n qc_error.all_qc_errors.append(\n qce(\n station_name=station.name,\n error_code=9000,\n old_data_value=lat,\n explanation=\"lats are different for: \" + station.name +\n \". Old value : \" + str(station_def['lat'])\n ))\n if lon_diff > .1:\n qc_error.all_qc_errors.append(\n qce(\n station_name=station.name,\n error_code=9000,\n old_data_value=lon,\n explanation=\"lons are different for: \" + station.name +\n \". Old value : \" + str(station_def['lon'])\n ))",
"def gpt2_1w (station, dmjd,dlat,dlon,hell,it):\n\n# need to find diffpod and difflon\n if (dlon < 0):\n plon = (dlon + 2*np.pi)*180/np.pi;\n else:\n plon = dlon*180/np.pi;\n# transform to polar distance in degrees\n ppod = (-dlat + np.pi/2)*180/np.pi; \n\n# % find the index (line in the grid file) of the nearest point\n# \t % changed for the 1 degree grid (GP)\n ipod = np.floor(ppod+1); \n ilon = np.floor(plon+1);\n \n# normalized (to one) differences, can be positive or negative\n#\t% changed for the 1 degree grid (GP)\n diffpod = (ppod - (ipod - 0.5));\n difflon = (plon - (ilon - 0.5));\n\n\n# change the reference epoch to January 1 2000\n print('Modified Julian Day', dmjd)\n dmjd1 = dmjd-51544.5 \n\n pi2 = 2*np.pi\n pi4 = 4*np.pi\n\n# mean gravity in m/s**2\n gm = 9.80665;\n# molar mass of dry air in kg/mol\n dMtr = 28.965E-3 \n# dMtr = 28.965*10^-3 \n# universal gas constant in J/K/mol\n Rg = 8.3143 \n\n# factors for amplitudes, i.e. whether you want time varying\n if (it==1):\n print('>>>> no refraction time variation ')\n cosfy = 0; coshy = 0; sinfy = 0; sinhy = 0;\n else: \n cosfy = np.cos(pi2*dmjd1/365.25)\n coshy = np.cos(pi4*dmjd1/365.25) \n sinfy = np.sin(pi2*dmjd1/365.25) \n sinhy = np.sin(pi4*dmjd1/365.25) \n cossin = np.matrix([1, cosfy, sinfy, coshy, sinhy])\n# initialization of new vectors\n p = 0; T = 0; dT = 0; Tm = 0; e = 0; ah = 0; aw = 0; la = 0; undu = 0;\n undul = np.zeros(4)\n Ql = np.zeros(4)\n dTl = np.zeros(4)\n Tl = np.zeros(4)\n pl = np.zeros(4)\n ahl = np.zeros(4)\n awl = np.zeros(4)\n lal = np.zeros(4)\n Tml = np.zeros(4)\n el = np.zeros(4)\n#\n pgrid, Tgrid, Qgrid, dTgrid, u, Hs, ahgrid, awgrid, lagrid, Tmgrid = read_4by5(station,dlat,dlon,hell)\n#\n for l in [0,1,2,3]:\n KL = l #silly to have this as a variable like this \n# transforming ellipsoidal height to orthometric height:\n# Hortho = -N + Hell\n undul[l] = u[KL] \n hgt = hell-undul[l] \n# pressure, temperature at the height of the grid\n T0 = Tgrid[KL,0] + Tgrid[KL,1]*cosfy + Tgrid[KL,2]*sinfy + Tgrid[KL,3]*coshy + Tgrid[KL,4]*sinhy;\n tg = float(Tgrid[KL,:] *cossin.T)\n# print(T0,tg)\n\n p0 = pgrid[KL,0] + pgrid[KL,1]*cosfy + pgrid[KL,2]*sinfy + pgrid[KL,3]*coshy + pgrid[KL,4]*sinhy;\n \n# humidity \n Ql[l] = Qgrid[KL,0] + Qgrid[KL,1]*cosfy + Qgrid[KL,2]*sinfy + Qgrid[KL,3]*coshy + Qgrid[KL,4]*sinhy;\n \n# reduction = stationheight - gridheight\n Hs1 = Hs[KL]\n redh = hgt - Hs1;\n\n# lapse rate of the temperature in degree / m\n dTl[l] = dTgrid[KL,0] + dTgrid[KL,1]*cosfy + dTgrid[KL,2]*sinfy + dTgrid[KL,3]*coshy + dTgrid[KL,4]*sinhy;\n \n# temperature reduction to station height\n Tl[l] = T0 + dTl[l]*redh - 273.15;\n\n# virtual temperature\n Tv = T0*(1+0.6077*Ql[l]) \n c = gm*dMtr/(Rg*Tv) \n \n# pressure in hPa\n pl[l] = (p0*np.exp(-c*redh))/100 \n \n# hydrostatic coefficient ah\n ahl[l] = ahgrid[KL,0] + ahgrid[KL,1]*cosfy + ahgrid[KL,2]*sinfy + ahgrid[KL,3]*coshy + ahgrid[KL,4]*sinhy;\n \n# wet coefficient aw\n awl[l] = awgrid[KL,0] + awgrid[KL,1]*cosfy + awgrid[KL,2]*sinfy + awgrid[KL,3]*coshy + awgrid[KL,4]*sinhy;\n\t\t\t\t\t \n# water vapor decrease factor la - added by GP\n lal[l] = lagrid[KL,0] + lagrid[KL,1]*cosfy + lagrid[KL,2]*sinfy + lagrid[KL,3]*coshy + lagrid[KL,4]*sinhy;\n\t\t\t\t\t \n# mean temperature of the water vapor Tm - added by GP\n Tml[l] = Tmgrid[KL,0] + Tmgrid[KL,1]*cosfy + Tmgrid[KL,2]*sinfy + Tmgrid[KL,3]*coshy + Tmgrid[KL,4]*sinhy;\n\t\t\t\t\t \t\t \n# water vapor pressure in hPa - changed by GP\n e0 = Ql[l]*p0/(0.622+0.378*Ql[l])/100; # % on the grid\n aa = (100*pl[l]/p0)\n bb = lal[l]+1\n el[l] = e0*np.power(aa,bb) # % on the station height - (14) Askne and Nordius, 1987\n \n dnpod1 = np.abs(diffpod); # % distance nearer point\n dnpod2 = 1 - dnpod1; # % distance to distant point\n dnlon1 = np.abs(difflon);\n dnlon2 = 1 - dnlon1;\n \n# pressure\n R1 = dnpod2*pl[0]+dnpod1*pl[1];\n R2 = dnpod2*pl[2]+dnpod1*pl[3];\n p = dnlon2*R1+dnlon1*R2;\n \n# temperature\n R1 = dnpod2*Tl[0]+dnpod1*Tl[1];\n R2 = dnpod2*Tl[2]+dnpod1*Tl[3];\n T = dnlon2*R1+dnlon1*R2;\n \n# temperature in degree per km\n R1 = dnpod2*dTl[0]+dnpod1*dTl[1];\n R2 = dnpod2*dTl[2]+dnpod1*dTl[3];\n dT = (dnlon2*R1+dnlon1*R2)*1000;\n \n# water vapor pressure in hPa - changed by GP\n R1 = dnpod2*el[0]+dnpod1*el[1];\n R2 = dnpod2*el[2]+dnpod1*el[3];\n e = dnlon2*R1+dnlon1*R2;\n \n# hydrostatic\n R1 = dnpod2*ahl[0]+dnpod1*ahl[1];\n R2 = dnpod2*ahl[2]+dnpod1*ahl[3];\n ah = dnlon2*R1+dnlon1*R2;\n \n# wet\n R1 = dnpod2*awl[0]+dnpod1*awl[1];\n R2 = dnpod2*awl[2]+dnpod1*awl[3];\n aw = dnlon2*R1+dnlon1*R2;\n \n# undulation\n R1 = dnpod2*undul[0]+dnpod1*undul[1];\n R2 = dnpod2*undul[2]+dnpod1*undul[3];\n undu = dnlon2*R1+dnlon1*R2;\n\n# water vapor decrease factor la - added by GP\n R1 = dnpod2*lal[0]+dnpod1*lal[1];\n R2 = dnpod2*lal[2]+dnpod1*lal[3];\n la = dnlon2*R1+dnlon1*R2;\n\t\t\n# mean temperature of the water vapor Tm - added by GP\n R1 = dnpod2*Tml[0]+dnpod1*Tml[1];\n R2 = dnpod2*Tml[2]+dnpod1*Tml[3];\n Tm = dnlon2*R1+dnlon1*R2; \n\n return p, T, dT,Tm,e,ah,aw,la,undu",
"def find_endpoints(batch_trajectories):\n # empty lists to fill\n site_lats = []\n site_lons = []\n last_lats = []\n last_lons = []\n lats_150 = []\n lons_150 = [] \n last_times = []\n times_150 = []\n last_sst = []\n sst_150 = []\n \n # temporary lists as placeholders\n temp_site_lats = []\n temp_site_lons = []\n temp_lats = []\n temp_lons = []\n temp_lats150 = []\n temp_lons150 = []\n temp_times = []\n temp_times150 = []\n temp_sst = []\n temp_sst150 = []\n\n for speed in range(len(batch_trajectories)):\n # working with one speed at a time means working with one nc file at\n # a time\n \n # reset temporary lists\n temp_site_lats = []\n temp_site_lons = []\n temp_lats = []\n temp_lons = []\n temp_lats150 = []\n temp_lons150 = []\n temp_times = []\n temp_times150 = []\n temp_sst = []\n temp_sst150 = []\n\n # extract variables into lists\n lats = batch_trajectories[speed].variables['lat'][:]\n lons = batch_trajectories[speed].variables['lon'][:]\n lats150 = batch_trajectories[speed].variables['lat150'][:]\n lons150 = batch_trajectories[speed].variables['lon150'][:]\n times = batch_trajectories[speed].variables['time'][:]\n ssts = batch_trajectories[speed].variables['temp'][:]\n ssts_150 = batch_trajectories[speed].variables['temp150'][:]\n\n # if a particle is deleted before time is up, values are masked. \n # We'd like to get the last valid number.\n for trajectory in range(len(lats)):\n i = -1 # index for the last value\n while np.ma.is_masked(lats[trajectory][i]) is True:\n i -= 1 # if the value is masked, go to one value sooner\n \n j = i # use j for the 150m values\n while lats150[trajectory][j] > 0:\n # we want the first index where the latitude is recorded.\n # j is actually the last one where it's not recorded, so we\n # extract the information at index j+1\n j -= 1\n\n # once i and j are determined for a trajectory, we can extract the\n # variables and append them to temporary lists.\n temp_site_lats.append(lats[trajectory][0])\n temp_site_lons.append(lons[trajectory][0])\n temp_lats.append(lats[trajectory][i])\n temp_lons.append(lons[trajectory][i])\n temp_lats150.append(lats150[trajectory][j+1])\n temp_lons150.append(lons150[trajectory][j+1])\n temp_times.append(times[trajectory][i])\n temp_sst.append(ssts[trajectory][i])\n temp_sst150.append(ssts_150[trajectory][j+1])\n temp_times150.append(times[trajectory][j+1])\n \n # after the temporary lists are appended by sinking speed, they\n # are appended to the big lists that are returned by the function.\n # this keeps the structure of being separated by sinking speed.\n site_lats.append(temp_site_lats)\n site_lons.append(temp_site_lons)\n last_lats.append(temp_lats)\n last_lons.append(temp_lons)\n lats_150.append(temp_lats150)\n lons_150.append(temp_lons150)\n last_times.append(temp_times)\n times_150.append(temp_times150)\n last_sst.append(temp_sst)\n sst_150.append(temp_sst150)\n \n return site_lats, site_lons, last_lats, last_lons, lats_150, lons_150,\\\n last_times, times_150, last_sst, sst_150",
"def get_neigh_demand(city):\n\n # get station set S with more than 10 charge equipment\n static_file_path = exp_data_path + os.sep + 'static' + os.sep + 'static_feature_{}.csv'.format(city)\n static_feature = pd.read_csv(static_file_path, header=0)\n station_set = set(static_feature[static_feature.num >= 10].index)\n\n # calculate 10 nearest neighborhoods for each station, sort by distance and store their index, get a map\n neighbor_distance_map = {}\n matrix_distance = np.load(exp_data_path + os.sep + 'similarity' + os.sep + 'similarity_distance_{}_numpy.npy'.format(city), allow_pickle=True)\n all_distance_map = {i: [] for i in range(station_count[city])}\n for i in range(station_count[city]):\n if i not in station_set:\n continue\n for j in range(station_count[city]):\n if j not in station_set:\n continue\n all_distance_map[i].append((j, matrix_distance[i][j]))\n all_distance_map[i].sort(key=lambda x : x[1], reverse=True)\n neighbor_distance_map[i] = [idx for idx, distance in all_distance_map[i][:10]]\n\n # 11 times header, get static neighborhood feature for each station(in S), get csv: neighbor_feature_{city}.csv\n ALL_HEADER = ['index']\n ALL_HEADER.extend(GENERAL_HEADER)\n for i in range(10):\n for j in GENERAL_HEADER:\n ALL_HEADER.append('{}_{}'.format(j, i))\n\n raw_data = np.empty((len(neighbor_distance_map), len(ALL_HEADER)))\n for i, idx in enumerate(neighbor_distance_map.keys()):\n raw_data[i][0] = idx\n raw_data[i][1:1+len(GENERAL_HEADER)] = static_feature.iloc[idx]['num':'mall']\n for j in range(10):\n neighbor_idx = neighbor_distance_map[idx][j]\n raw_data[i][1+len(GENERAL_HEADER)*(j+1):1+len(GENERAL_HEADER)*(j+2)] = static_feature.iloc[neighbor_idx]['num':'mall']\n neighbor_feature_data = pd.DataFrame(raw_data, columns=ALL_HEADER)\n print('neighbor feature')\n print(neighbor_feature_data)\n\n neighbor_feature_path = exp_data_path + os.sep + 'static' + os.sep + 'static_neighor_feature_{}.csv'.format(city)\n if os.path.exists(neighbor_feature_path):\n os.remove(neighbor_feature_path)\n neighbor_feature_data.to_csv(neighbor_feature_path)\n\n # create final csv(11 times header with basic info(time_index + time_embed_index))\n # if index in S, fill basic info, neighbor_feature and demand\n\n demand = np.load(exp_data_path + os.sep + 'station' + os.sep + 'demand_{}.npy'.format(city), allow_pickle=True)\n time_count = demand.shape[1]\n\n DEMAND_HEADER = []\n DEMAND_HEADER.extend(ALL_HEADER)\n DEMAND_HEADER.extend(['time_index', 'time_embed', 'demand'])\n neighbor_demand_raw_data = np.empty(((len(neighbor_distance_map)*time_count, len(DEMAND_HEADER))))\n\n # get time map like {\"0800\": 1, \"0830\": 2, ....}\n time_index_map = np.load(exp_data_path + os.sep + 'station_list' + os.sep + 'time_index.npy', allow_pickle=True)\n time_index_map = dict(time_index_map.tolist())\n time_map = {t: i for i, t in enumerate(sorted(set([k[-4:] for k in time_index_map['rev_index'].keys()])))}\n\n cur_idx = 0\n for time_idx in range(time_count):\n time_embed_idx = time_map[time_index_map['index'][time_idx][-4:]]\n for station_idx in station_set:\n neighbor_demand_raw_data[cur_idx][0:len(ALL_HEADER)] = neighbor_feature_data.loc[neighbor_feature_data['index']==station_idx, 'index':'mall_9']\n neighbor_demand_raw_data[cur_idx][len(ALL_HEADER)] = time_idx\n neighbor_demand_raw_data[cur_idx][len(ALL_HEADER)+1] = time_embed_idx\n neighbor_demand_raw_data[cur_idx][len(ALL_HEADER)+2] = demand[station_idx][time_idx][-1]\n # todo add slow demand and quick demand here\n cur_idx = cur_idx + 1\n print(cur_idx, neighbor_demand_raw_data.shape)\n\n neighbor_demand_data = pd.DataFrame(neighbor_demand_raw_data, columns=DEMAND_HEADER)\n print('neighbor demand')\n print(neighbor_demand_data)\n\n neighbor_demand_path = exp_data_path + os.sep + 'static' + os.sep + 'neighbor_demand_{}.csv'.format(city)\n if os.path.exists(neighbor_demand_path):\n os.remove(neighbor_demand_path)\n neighbor_demand_data.to_csv(neighbor_demand_path)",
"def Fetch_station(long, lat, y):\r\n global ddf\r\n dmin = 1000000\r\n rs = 0\r\n i=0\r\n for i in range(len(ddf[y])):\r\n #Calculate the distance between zip code location and weather station location\r\n dnew = Distance_orthonormique(ddf[y]['LON'][i], ddf[y]['LAT'][i], long, lat)\r\n\r\n if(dmin > dnew):\r\n #If the last smaller distance is superior than the current distance :\r\n #the new smaller distance is the current distance\r\n dmin = dnew\r\n rs = i\r\n\r\n #rs = index dataframe weather station\r\n #ddf[y]['STATION NAME'][rs] = Weather station name\r\n #round(dmin, 2) = Distance between weather station and zip code\r\n \r\n return rs, ddf[y]['STATION NAME'][rs], round(dmin,2)",
"def prepare_input(self, only_center = True):\n \n if only_center:\n nx = [0]\n ny = [0]\n else:\n nx = [0,1,-1]\n ny = [0,1,-1]\n gauge = dd.read_csv(str(Path(self.db_location, 'gauge', '*.csv.gz')), \n compression='gzip', \n assume_missing=True,\n dtype = {'TIMESTAMP':int, 'STATION': str})\n \n gauge = gauge.compute().drop_duplicates()\n gauge = gauge.replace(-9999,np.nan)\n for x in nx:\n for y in ny:\n logging.info('Processing neighbour {:d}{:d}'.format(x, y))\n radar = dd.read_parquet(str(Path(self.db_location, 'radar',\n '*.parquet')))\n refer = dd.read_parquet(str(Path(self.db_location, 'reference', \n '*.parquet')))\n \n # Select only required pixel\n radar = radar.loc[np.logical_and(radar['NX'] == x, \n radar['NY'] == y)]\n refer = refer.loc[np.logical_and(refer['NX'] == x, \n refer['NY'] == y)]\n \n # Convert to pandas and remove duplicates \n radar = radar.compute().drop_duplicates(subset = ['TIMESTAMP',\n 'STATION',\n 'RADAR',\n 'NX','NY',\n 'SWEEP'])\n \n refer = refer.compute().drop_duplicates(subset = ['TIMESTAMP',\n 'STATION'])\n \n radar = radar.sort_values(by = ['TIMESTAMP','STATION','SWEEP'])\n refer = refer.sort_values(by = ['TIMESTAMP','STATION'])\n gauge = gauge.sort_values(by = ['TIMESTAMP','STATION'])\n # Get only valid precip data\n gauge = gauge[np.isfinite(gauge['RRE150Z0'])]\n \n # Create individual 10 min - station stamps\n gauge['s-tstamp'] = np.array(gauge['STATION'] + \n gauge['TIMESTAMP'].astype(str)).astype(str)\n radar['s-tstamp'] = np.array(radar['STATION'] + \n radar['TIMESTAMP'].astype(str)).astype(str)\n refer['s-tstamp'] = np.array(refer['STATION'] + \n refer['TIMESTAMP'].astype(str)).astype(str)\n \n # Get gauge and reference only when radar data available\n \n # Find timestamps that are in the three datasets\n ststamp_common = np.array(pd.Series(list(set(gauge['s-tstamp'])\n .intersection(set(refer['s-tstamp'])))))\n ststamp_common = np.array(pd.Series(list(set(radar['s-tstamp'])\n .intersection(set(ststamp_common)))))\n radar = radar.loc[radar['s-tstamp'].isin(ststamp_common)]\n gauge = gauge.loc[gauge['s-tstamp'].isin(ststamp_common)]\n refer = refer.loc[refer['s-tstamp'].isin(ststamp_common)]\n \n \n # Filter incomplete hours\n stahour = np.array(gauge['STATION'] + \n ((gauge['TIMESTAMP'] - 600 ) - \n (gauge['TIMESTAMP'] - 600 ) % 3600).astype(str)).astype(str)\n \n full_hours = np.array(gauge.groupby(stahour)['STATION']\n .transform('count') == 6)\n \n refer = refer.reindex[full_hours]\n gauge = gauge.reindex[full_hours] \n radar = radar.reindex[radar['s-tstamp'].\n isin(np.array(gauge['s-tstamp']))]\n \n stahour = stahour[full_hours]\n \n # Creating vertical grouping index\n \n _, idx, grp_vertical = np.unique(radar['s-tstamp'],\n return_inverse = True,\n return_index = True)\n # Get original order\n sta_tstamp_unique = radar['s-tstamp'][np.sort(idx)]\n # Preserves order and avoids sorting radar_statstamp\n grp_vertical = idx[grp_vertical]\n # However one issue is that the indexes are not starting from zero with increment\n # of one, though they are sorted, they are like 0,7,7,7,15,15,23,23\n # We want them starting from zero with step of one\n grp_vertical = rankdata(grp_vertical,method='dense') - 1\n \n # Repeat operation with gauge hours\n sta_hourly_unique, idx, grp_hourly = np.unique(stahour, \n return_inverse = True,\n return_index = True)\n grp_hourly = idx[grp_hourly]\n \n # Add derived variables height iso0 (HISO) and height above ground (HAG)\n # Radar\n stations = constants.METSTATIONS\n cols = list(stations.columns)\n cols[1] = 'STATION'\n stations.columns = cols\n radar = pd.merge(radar,stations, how = 'left', on = 'STATION',\n sort = False)\n \n radar['HISO'] = -radar['T'] / constants.LAPSE_RATE * 100\n radar['HAG'] = radar['HEIGHT'] - radar['Z']\n radar['HAG'][radar['HAG'] < 0] = 0\n \n # Gauge\n gauge['minutes'] = (gauge['TIMESTAMP'] % 3600)/60\n \n # Save all to file\n refer.to_parquet(str(Path(self.input_location, \n 'reference_x{:d}y{:d}.parquet'.format(x,y))),\n compression = 'gzip', index = False)\n \n radar.to_parquet(str(Path(self.input_location, \n 'radar_x{:d}y{:d}.parquet'.format(x,y))),\n compression = 'gzip', index = False)\n \n grp_idx = {}\n grp_idx['grp_vertical'] = grp_vertical\n grp_idx['grp_hourly'] = grp_hourly\n grp_idx['tstamp_unique'] = sta_tstamp_unique\n \n pickle.dump(grp_idx, \n open(str(Path(self.input_location, \n 'grouping_idx_x{:d}y{:d}.p'.format(x,y))),'wb'))\n \n if x == 0 and y == 0:\n # Save only gauge for center pixel since it's available only there\n gauge.to_parquet(str(Path(self.input_location, 'gauge.parquet')),\n compression = 'gzip', index = False)",
"def nearest_loop(row, gdf2,geometry_cols=['geo_lon','geo_lat'],src_column=None,surrounding=False):\r\n def haversine_distance(origin, destination):\r\n lon1, lat1 = origin\r\n lon2, lat2 = destination\r\n radius = 6371000 # meters\r\n \r\n dlat = math.radians(lat2-lat1)\r\n dlon = math.radians(lon2-lon1)\r\n a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \\\r\n * math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)\r\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))\r\n d = radius * c\r\n return d\r\n\r\n # start the main iteration\r\n if row.geometry.type == 'Polygon':\r\n point_xy = np.array((row.geometry.centroid.x,\r\n row.geometry.centroid.y))\r\n if row.geometry.type in ['Point', 'LineString']:\r\n point_xy = np.array((row.geometry.x, row.geometry.y)) \r\n # Select most current stations datasets.\r\n closest = None\r\n closest_distance = 99999999999\r\n for _, station in gdf2.iterrows():\r\n d = haversine_distance((point_xy[0], point_xy[1]),\r\n (station[geometry_cols[0]], station[geometry_cols[1]]))\r\n if d < closest_distance:\r\n closest = station\r\n closest_distance = d\r\n # if surroung \r\n if surrounding:\r\n closest1 = []\r\n closest_distance = closest_distance+surrounding\r\n i = 0\r\n for _, station in gdf2.iterrows():\r\n d = haversine_distance((point_xy[0], point_xy[1]),\r\n (station[geometry_cols[0]], station[geometry_cols[1]]))\r\n if d < closest_distance:\r\n closest1.append(station)\r\n i += 1\r\n closest = closest1\r\n return closest[src_column]",
"def _get_storm_velocities_missing(\n storm_object_table,\n e_folding_radius_metres=DEFAULT_VELOCITY_EFOLD_RADIUS_METRES):\n\n east_velocities_m_s01 = storm_object_table[\n tracking_utils.EAST_VELOCITY_COLUMN].values\n\n north_velocities_m_s01 = storm_object_table[\n tracking_utils.NORTH_VELOCITY_COLUMN].values\n\n if not numpy.any(numpy.isnan(east_velocities_m_s01)):\n return storm_object_table\n\n unique_times_unix_sec, orig_to_unique_indices = numpy.unique(\n storm_object_table[tracking_utils.VALID_TIME_COLUMN].values,\n return_inverse=True)\n\n num_times = len(unique_times_unix_sec)\n\n # Use neighbouring storms at same time to estimate missing velocities.\n for j in range(num_times):\n these_indices = numpy.where(orig_to_unique_indices == j)[0]\n if not numpy.any(numpy.isnan(east_velocities_m_s01[these_indices])):\n continue\n\n (east_velocities_m_s01[these_indices],\n north_velocities_m_s01[these_indices]\n ) = _estimate_velocity_by_neigh(\n x_coords_metres=storm_object_table[\n CENTROID_X_COLUMN].values[these_indices],\n y_coords_metres=storm_object_table[\n CENTROID_Y_COLUMN].values[these_indices],\n x_velocities_m_s01=east_velocities_m_s01[these_indices],\n y_velocities_m_s01=north_velocities_m_s01[these_indices],\n e_folding_radius_metres=e_folding_radius_metres)\n\n if not numpy.any(numpy.isnan(east_velocities_m_s01)):\n return storm_object_table.assign(**{\n tracking_utils.EAST_VELOCITY_COLUMN: east_velocities_m_s01,\n tracking_utils.NORTH_VELOCITY_COLUMN: north_velocities_m_s01\n })\n\n # Use all storms at same time to estimate missing velocities.\n for j in range(num_times):\n these_indices = numpy.where(orig_to_unique_indices == j)[0]\n if not numpy.any(numpy.isnan(east_velocities_m_s01[these_indices])):\n continue\n\n (east_velocities_m_s01[these_indices],\n north_velocities_m_s01[these_indices]\n ) = _estimate_velocity_by_neigh(\n x_coords_metres=storm_object_table[\n CENTROID_X_COLUMN].values[these_indices],\n y_coords_metres=storm_object_table[\n CENTROID_Y_COLUMN].values[these_indices],\n x_velocities_m_s01=east_velocities_m_s01[these_indices],\n y_velocities_m_s01=north_velocities_m_s01[these_indices],\n e_folding_radius_metres=numpy.nan)\n\n if not numpy.any(numpy.isnan(east_velocities_m_s01)):\n return storm_object_table.assign(**{\n tracking_utils.EAST_VELOCITY_COLUMN: east_velocities_m_s01,\n tracking_utils.NORTH_VELOCITY_COLUMN: north_velocities_m_s01\n })\n\n # Use neighbouring storms at all times to estimate missing velocities.\n for j in range(num_times):\n these_indices = numpy.where(orig_to_unique_indices == j)[0]\n if not numpy.any(numpy.isnan(east_velocities_m_s01[these_indices])):\n continue\n\n these_east_velocities_m_s01, these_north_velocities_m_s01 = (\n _estimate_velocity_by_neigh(\n x_coords_metres=storm_object_table[CENTROID_X_COLUMN].values,\n y_coords_metres=storm_object_table[CENTROID_Y_COLUMN].values,\n x_velocities_m_s01=east_velocities_m_s01 + 0.,\n y_velocities_m_s01=north_velocities_m_s01 + 0.,\n e_folding_radius_metres=e_folding_radius_metres)\n )\n\n east_velocities_m_s01[these_indices] = these_east_velocities_m_s01[\n these_indices]\n north_velocities_m_s01[these_indices] = these_north_velocities_m_s01[\n these_indices]\n\n if not numpy.any(numpy.isnan(east_velocities_m_s01)):\n return storm_object_table.assign(**{\n tracking_utils.EAST_VELOCITY_COLUMN: east_velocities_m_s01,\n tracking_utils.NORTH_VELOCITY_COLUMN: north_velocities_m_s01\n })\n\n # Use all storms at all times to estimate missing velocities.\n for j in range(num_times):\n these_indices = numpy.where(orig_to_unique_indices == j)[0]\n if not numpy.any(numpy.isnan(east_velocities_m_s01[these_indices])):\n continue\n\n these_east_velocities_m_s01, these_north_velocities_m_s01 = (\n _estimate_velocity_by_neigh(\n x_coords_metres=storm_object_table[CENTROID_X_COLUMN].values,\n y_coords_metres=storm_object_table[CENTROID_Y_COLUMN].values,\n x_velocities_m_s01=east_velocities_m_s01 + 0.,\n y_velocities_m_s01=north_velocities_m_s01 + 0.,\n e_folding_radius_metres=numpy.nan)\n )\n\n east_velocities_m_s01[these_indices] = these_east_velocities_m_s01[\n these_indices]\n north_velocities_m_s01[these_indices] = these_north_velocities_m_s01[\n these_indices]\n\n if not numpy.any(numpy.isnan(east_velocities_m_s01)):\n return storm_object_table.assign(**{\n tracking_utils.EAST_VELOCITY_COLUMN: east_velocities_m_s01,\n tracking_utils.NORTH_VELOCITY_COLUMN: north_velocities_m_s01\n })\n\n # Replace missing velocities with defaults.\n nan_indices = numpy.where(numpy.isnan(east_velocities_m_s01))[0]\n east_velocities_m_s01[nan_indices] = DEFAULT_EAST_VELOCITY_M_S01\n north_velocities_m_s01[nan_indices] = DEFAULT_NORTH_VELOCITY_M_S01\n\n return storm_object_table.assign(**{\n tracking_utils.EAST_VELOCITY_COLUMN: east_velocities_m_s01,\n tracking_utils.NORTH_VELOCITY_COLUMN: north_velocities_m_s01\n })",
"def calculate_vars(data, lat, lon):\n # Keep track of running distance and time calculations\n distance_to_dest = 0.0\n time_estimate = 0.0\n\n # Calculate from starting dest to first point in data\n user_coords = (lat, lon)\n first_path_coords = (data[0][\"lat\"], data[0][\"lon\"])\n first_distance = geopy.distance.distance(user_coords, first_path_coords).miles\n distance_to_dest += first_distance\n time_estimate += first_distance * 20 # 3mph walking speed\n\n # Calculate for all other points\n for i in range(1, len(data) - 1):\n this_coords = (data[i][\"lat\"], data[i][\"lon\"])\n next_coords = (data[i + 1][\"lat\"], data[i + 1][\"lon\"])\n\n distance = geopy.distance.distance(this_coords, next_coords).miles\n distance_to_dest += distance\n time_estimate += distance * 20 # 3mph walking speed\n\n # Round distance and time estimates\n distance_to_dest = round(distance_to_dest, 1)\n time_estimate = round(time_estimate)\n\n return distance_to_dest, time_estimate",
"def test_stations_by_distance():\n station_list = build_station_list()\n #test for stations closest to cambridge city coordinates\n station_list_sort = stations_by_distance(station_list, (52.2053, 0.1218))\n output = [(station.name, distance) for (station, distance) in station_list_sort]\n for n in range(1, len(station_list)):\n #make sure that the distance of the previous station to the point is less than the next one in the list\n assert output[n-1][1] <= output[n][1]",
"def forecast_for_closest(\n lat: float, lon: float, lang=_DEFAULT_LANG, num_stations_to_try: int = 3\n) -> Tuple[Dict, Dict]:\n assert lang in _SUPPORTED_LANGS\n\n stations = closest_stations(lat, lon, limit=num_stations_to_try)\n for s in stations:\n o = forecast_for_station(s[\"id\"], lang=lang)\n if o[\"results\"] and not o[\"results\"][0].get(\"err\") and o[\"results\"][0][\"valid\"]:\n return o, s\n\n return forecast_for_station(stations[0][\"id\"], lang=lang), stations[0]",
"def equalise_ts(station_list):\n s_y={}\n s_y_size = 5000 #large number for first pass\n for s in station_list:\n s_data = np.loadtxt('station_fuelType/'+s+\"_P98\",delimiter=',')\n s_y[s] = s_data[:,1]\n while float(s_y[s][0]) == 0.0:\n s_y[s]=s_y[s][1:len(s_y[s])]\n if len(s_y[s])<s_y_size:\n s_y_size = len(s_y[s])\n \n for s in s_y:\n if len(s_y[s])>s_y_size:\n s_y[s]=s_y[s][len(s_y[s])-s_y_size:len(s_y[s])]\n \n ts_y = np.ndarray(shape=[0, s_y_size], dtype = 'float')\n for key, v in s_y.items():\n ts_y = np.vstack([ts_y, np.array(v)])\n\n return ts_y",
"def main():\n #get_lat_long\n place_name = 'Arlington - Arlington St'\n # sec_fun = get_lat_long(place_name)\n # print(sec_fun)\n # get_nearest_station(sec_fun[0], sec_fun[1]) #\n # get_nearest_station(42.350009, -71.076077)\n print(find_stop_near(place_name))",
"def process(date, lat_oi, lon_oi, shared_args, verbose=False):\n \n filename = download(date, shared_args)\n\n atmo_data = data.open_netcdf4(filename)\n\n # choose points\n lat = atmo_data.variables['lat'][:]\n lon = atmo_data.variables['lon'][:]\n lat = numpy.stack([lat]*lon.shape[0], axis=0)\n lon = numpy.stack([lon]*lat.shape[1], axis=1)\n chosen_idxs, data_coor = funcs.choose_points(lat, lon, lat_oi, lon_oi)\n\n latidx = tuple(chosen_idxs[0])\n lonidx = tuple(chosen_idxs[1])\n \n t1, t2 = data.closest_hours(atmo_data.variables['time'][:].data,\n atmo_data.variables['time'].units, date)\n t1_dt = num2date(atmo_data.variables['time'][t1], atmo_data.variables['time'].units)\n t2_dt = num2date(atmo_data.variables['time'][t2], atmo_data.variables['time'].units)\n\n index1 = (t1, slice(None), latidx, lonidx)\n index2 = (t2, slice(None), latidx, lonidx)\n\n press = numpy.array(atmo_data.variables['lev'][:])\n\n temp1 = numpy.empty\n temp2 = numpy.empty\n \n temp1 = numpy.diagonal(atmo_data.variables['T'][index1], axis1=1, axis2=2).T\n temp2 = numpy.diagonal(atmo_data.variables['T'][index2], axis1=1, axis2=2).T\n\n rhum1 = numpy.diagonal(atmo_data.variables['RH'][index1], axis1=1, axis2=2).T # relative humidity\n rhum2 = numpy.diagonal(atmo_data.variables['RH'][index2], axis1=1, axis2=2).T\n\n height1 = numpy.diagonal(atmo_data.variables['H'][index1], axis1=1, axis2=2).T / 1000.0 # height\n height2 = numpy.diagonal(atmo_data.variables['H'][index2], axis1=1, axis2=2).T / 1000.0\n\n # interpolate in time, now they are shape (4, N)\n t = interp.interp_time(date, temp1, temp2, t1_dt, t2_dt)\n h = interp.interp_time(date, height1, height2, t1_dt, t2_dt)\n rh = interp.interp_time(date, rhum1, rhum2, t1_dt, t2_dt)\n \n # interpolate in space, now they are shape (1, N)\n height = interp.idw(h, data_coor, [lat_oi, lon_oi])\n temp = interp.idw(t, data_coor, [lat_oi, lon_oi])\n relhum = interp.idw(rh, data_coor, [lat_oi, lon_oi])\n \n # calculate the number of nan and zero values in the array and remove them, reducing the size of the array accordingly\n nr_of_nans1 = numpy.sum(temp1[0].mask)\n nr_of_nans2 = numpy.sum(temp2[0].mask)\n nr_of_nans = max([nr_of_nans1,nr_of_nans2])\n \n height = height[nr_of_nans:]\n temp = temp[nr_of_nans:]\n relhum = relhum[nr_of_nans:]\n press = press[nr_of_nans:]\n\n # load standard atmosphere for mid-lat summer\n # TODO evaluate standard atmo validity, add different ones for different TOY?\n stan_atmo = numpy.loadtxt(settings.STAN_ATMO, unpack=True)\n stan_height, stan_press, stan_temp, stan_relhum = stan_atmo\n # add standard atmo above cutoff index\n \n cutoff_idx = numpy.abs(stan_press - press[-1]).argmin()\n height = numpy.append(height, stan_height[cutoff_idx:])\n press = numpy.append(press, stan_press[cutoff_idx:])\n temp = numpy.append(temp, stan_temp[cutoff_idx:])\n relhum = numpy.append(relhum, stan_relhum[cutoff_idx:])\n \n # Convert relative humidity to percentage for modtran\n relhum = relhum * 100\n\n # TODO add buoy stuff to bottom of atmosphere\n\n if verbose:\n # send out plots and stuff\n stuff = numpy.asarray([height, press, temp, relhum]).T\n h = 'Height [km], Pressure[kPa], Temperature[k], Relative_Humidity[0-100]' + '\\nCoordinates: {0} Buoy:{1}'.format(data_coor, buoy)\n \n numpy.savetxt('atmosphere_{0}_{1}_{2}.txt'.format('merra', date.strftime('%Y%m%d'), buoy.id), stuff, fmt='%7.2f, %7.2f, %7.2f, %7.2f', header=h)\n\n return height, press, temp, relhum",
"def _compute(self, w_beg, w_end, signal, station_availability):\n\n avail_idx = np.where(station_availability == 1)[0]\n sige = signal[0]\n sign = signal[1]\n sigz = signal[2]\n\n p_onset_raw, p_onset = self._compute_p_onset(sigz,\n self.sampling_rate)\n s_onset_raw, s_onset = self._compute_s_onset(sige, sign,\n self.sampling_rate)\n self.data.p_onset = p_onset\n self.data.s_onset = s_onset\n self.data.p_onset_raw = p_onset_raw\n self.data.s_onset_raw = s_onset_raw\n\n ps_onset = np.concatenate((self.data.p_onset, self.data.s_onset))\n ps_onset[np.isnan(ps_onset)] = 0\n\n p_ttime = self.lut.fetch_index(\"TIME_P\", self.sampling_rate)\n s_ttime = self.lut.fetch_index(\"TIME_S\", self.sampling_rate)\n ttime = np.c_[p_ttime, s_ttime]\n del p_ttime, s_ttime\n\n nchan, tsamp = ps_onset.shape\n\n pre_smp = int(round(self.pre_pad * int(self.sampling_rate)))\n pos_smp = int(round(self.post_pad * int(self.sampling_rate)))\n nsamp = tsamp - pre_smp - pos_smp\n\n # Prep empty 4-D coalescence map and run C-compiled ilib.migrate()\n ncell = tuple(self.lut.cell_count)\n map_4d = np.zeros(ncell + (nsamp,), dtype=np.float64)\n ilib.migrate(ps_onset, ttime, pre_smp, pos_smp, nsamp, map_4d,\n self.n_cores)\n\n # Prep empty coa and loc arrays and run C-compiled ilib.find_max_coa()\n max_coa = np.zeros(nsamp, np.double)\n grid_index = np.zeros(nsamp, np.int64)\n ilib.find_max_coa(map_4d, max_coa, grid_index, 0, nsamp, self.n_cores)\n\n # Get max_coa_norm\n sum_coa = np.sum(map_4d, axis=(0, 1, 2))\n max_coa_norm = max_coa / sum_coa\n max_coa_norm = max_coa_norm * map_4d.shape[0] * map_4d.shape[1] * \\\n map_4d.shape[2]\n\n tmp = np.arange(w_beg + self.pre_pad,\n w_end - self.post_pad + (1 / self.sampling_rate),\n 1 / self.sampling_rate)\n daten = [x.datetime for x in tmp]\n\n # Calculate max_coa (with correction for number of stations)\n max_coa = np.exp((max_coa / (len(avail_idx) * 2)) - 1.0)\n\n loc = self.lut.xyz2index(grid_index, inverse=True)\n\n return daten, max_coa, max_coa_norm, loc, map_4d",
"def run():\n\n # Build list of tuples of station names and distance \n stations = build_station_list()\n p = (52.2053, 0.1218)\n by_distance = stations_by_distance(stations, p)\n for n in range(10):\n print(by_distance[n])\n for n in range(10):\n i = len(by_distance) - 10 + n\n print(by_distance[i])",
"def update_stationlist(time_res='hourly',dbase_dir='dbase'):\r\n\r\n \r\n dwd_abbr = {'air_temperature': 'TU',\r\n 'cloud_type': 'CS', \r\n 'cloudiness': 'N',\r\n 'dew_point' : 'TD',\r\n 'extreme_temperature': 'TX',\r\n 'extreme_wind': 'FX',\r\n 'precipitation': 'RR',\r\n 'pressure': 'P0',\r\n 'soil_temperature': 'EB',\r\n 'solar': 'ST',\r\n 'sun': 'SD',\r\n 'visibility': 'VV',\r\n 'wind': 'FF',\r\n 'wind_synop': 'F'\r\n }\r\n \r\n # lets start\r\n print('Updating station list')\r\n \r\n # create output directory if not existing\r\n \r\n if not os.path.exists(dbase_dir):\r\n os.makedirs(dbase_dir)\r\n \r\n #check whether we have an up-to-date-station-list-already\r\n try:\r\n stations_network_old=[s for s in os.listdir(dbase_dir) if 'dwd_station_network' in s][0]\r\n datetime_network=datetime.date(datetime.strptime(re.findall('\\d+',stations_network_old)[0],'%Y%m%d'))\r\n #update if more than 24hours\r\n dt_today=datetime.date(datetime.now())\r\n if (dt_today-datetime_network)<timedelta(days=1):\r\n print('DWD network list is up-to-date, no update needed')\r\n filename_stations=dbase_dir+'\\\\'+stations_network_old\r\n return filename_stations\r\n else:\r\n print('DWD network list neeeds to be updated')\r\n os.remove(dbase_dir+'\\\\'+stations_network_old)\r\n except:\r\n print('DWD network list neeeds to be updated')\r\n pass\r\n \r\n \r\n # header\r\n stations_network=pd.DataFrame()\r\n \r\n # connect to ftp server and go to the folder\r\n \r\n # Connect to the Server\r\n server='opendata.dwd.de'\r\n ftp=connect_ftp(server = server,connected = False)\r\n #change to subfolder\r\n ftp.cwd('/climate_environment/CDC/observations_germany/climate/' + time_res +'/')\r\n #get dwd categories\r\n dwd_categories=ftp.nlst()\r\n #loop through the subfolders to get the station lists\r\n for category in dwd_categories:\r\n print('retrieve stationlist for', category)\r\n #try to get historical data\r\n try:\r\n dir_path='/climate_environment/CDC/observations_germany/climate/' + time_res +'/'+category+'/historical/'\r\n ftp.cwd(dir_path)\r\n except Exception as e:\r\n print(e, 'try to download category', category, 'from other folder')\r\n try:\r\n dir_path='/climate_environment/CDC/observations_germany/climate/' + time_res +'/'+category+'/'\r\n ftp.cwd(dir_path)\r\n except:\r\n print('Category', category, 'could not have been downloaded')\r\n pass\r\n #retrieve the stationlist\r\n stationlist = []\r\n # try to retrieve file\r\n retrieved=False\r\n filename=dwd_abbr[category]+'_Stundenwerte_Beschreibung_Stationen.txt'\r\n while not retrieved:\r\n try:\r\n ftp.retrlines(\"RETR \" + filename, stationlist.append)\r\n #ftp.retrbinary(\"RETR \" + filestr, stationlist.write)\r\n retrieved = True\r\n except:\r\n ftp=connect_ftp(server = server,connected = False)\r\n ftp.cwd(dir_path)\r\n #remove first two lines\r\n stationlist=stationlist[2:]\r\n #delete uncessary blanks\r\n stationlist=[re.sub(' +', ' ', station.rstrip()) for station in stationlist]\r\n #split the list\r\n stationlist=[station.split(\" \")[:7] for station in stationlist]\r\n #read as dataframe\r\n dfstations=pd.DataFrame(stationlist,columns=['STATIONS_ID','date_start','date_end','height','geo_lat','geo_lon','name'])\r\n #add true information to category\r\n dfstations[category]=True\r\n \r\n stations_network=stations_network.append(dfstations,sort=False,ignore_index=True)\r\n #A=[sub.split(\" \") for sub in stationlist] \r\n \r\n #replace all Na by False\r\n stations_network[stations_network.isna()]=0 \r\n #aggregate\r\n stations_network=stations_network.groupby(['STATIONS_ID'],as_index=False).agg('max')\r\n #replace zero by False in order to have pure boolean data\r\n stations_network.replace(0,False,inplace=True)\r\n #fix the error with station 14138 and 05614 and 07325, which does not have pressure cord\r\n stations_network.loc[stations_network.STATIONS_ID=='14138','pressure']=False\r\n stations_network.loc[stations_network.STATIONS_ID=='05614','pressure']=False\r\n stations_network.loc[stations_network.STATIONS_ID=='07325','pressure']=False\r\n stations_network.loc[stations_network.STATIONS_ID=='01572','pressure']=False\r\n #for temperature the same\r\n stations_network.loc[stations_network.STATIONS_ID=='14138','air_temperature']=False\r\n #save to database writing the time as well\r\n filename_stations=dbase_dir+'\\\\dwd_station_network_'+datetime.now().strftime('%Y%m%d')+'.csv'\r\n stations_network.to_csv(filename_stations,index=False)\r\n \r\n print('Updating station list...finished')\r\n \r\n return filename_stations",
"def get_nearest_station(latitude, longitude):\n url = '{}?api_key={}&filter[latitude]={}&filter[longitude]={}&sort=distance'.format(MBTA_BASE_URL,MBTA_API_KEY,latitude,longitude)\n # print(url)\n station_json = get_json(url)\n # print(station_json)\n station_name = station_json['data'][0]['attributes']['name']\n # # print(station_name)\n # # station_description = station_json['data'][0]['attributes']['description']\n # # if station_description:\n # # station_name = station_description\n # # print(station_description)\n wheelchair_boarding = station_json['data'][0]['attributes']['wheelchair_boarding']\n if wheelchair_boarding:\n wheelchair_boarding = \"This station is wheelchair accesible\"\n else:\n wheelchair_boarding = \"Sorry cripple\"\n # print(wheelchair_boarding)\n return station_name, wheelchair_boarding",
"def __getpredictors_distance(self, staname, distance):\n\n distfromsta = distance[staname]\n del distfromsta[staname] # remove the station to be fill from the dataframe\n distfromsta = distfromsta.sort_values()\n\n stations = self.network.getsta(distfromsta.index.values)\n # station = self.network.getsta(staname)\n\n # Only 3 closest stations\n # sel1 = [ (i,e) for i,e in zip(stations[0:2], stations[1:3])] # selction predictors with spacing 1\n # sel2 = [ (i,e) for i,e in zip(stations[0:2], stations[2:4])] # selction predictors with spacing 2\n\n # Use all stations\n sel1 = [(i, e) for i, e in zip(stations[0:-1], stations[1:])] # selction predictors with spacing 1\n sel2 = [(i, e) for i, e in zip(stations[0:-2], stations[2:])] # selction predictors with spacing 2\n\n # sel3 = [ (i,e) for i,e in zip(stations[0:-3], stations[3:])] # selction predictors with spacing 3\n # sel4 = [ (i,e) for i,e in zip(stations[0:-4], stations[4:])] # selction predictors with spacing 4\n\n # Only 3 closest stations\n # sel1names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:2], stations[1:3])] # selction predictors with spacing 1\n # sel2names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:2], stations[2:4])] # selction predictors with spacing 1\n\n # using all stations\n sel1names = [(i.getpara('stanames'), e.getpara('stanames')) for i, e in\n zip(stations[0:-1], stations[1:])] # selction predictors with spacing 1\n sel2names = [(i.getpara('stanames'), e.getpara('stanames')) for i, e in\n zip(stations[0:-2], stations[2:])] # selction predictors with spacing 1\n\n # sel3names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:-3], stations[3:])] # selction predictors with spacing 1\n # sel4names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:-4], stations[4:])] # selction predictors with spacing 1\n\n selection = [x for x in itertools.chain.from_iterable(itertools.izip_longest(sel1, sel2)) if x]\n selectionnames = [x for x in itertools.chain.from_iterable(itertools.izip_longest(sel1names, sel2names)) if x]\n\n return selection, selectionnames",
"def analyseCoordination(self):\n #create a list of criteria that correspond to maximal path length\n #max_path_length = max(self.pathLengths)\n\n #criterion_max_path_length = []\n #origins_max_path_length = []\n #for c in range(len(self.pathLengths)):\n # if self.pathLengths[c] == max_path_length:\n # criterion_max_path_length.append(self.globalMin[c])\n # origins_max_path_length.append(self.origins[c])\n\n #min_criterion = min(criterion_max_path_length)\n\n #find index\n #for m in range(len(criterion_max_path_length)):\n # if criterion_max_path_length[m] == min_criterion:\n # break\n\n #for s in range(len(self.origins)):\n # if self.origins[s] == origins_max_path_length[m]:\n # break\n\n min_criterion = self.globalMin[0]\n self.overall_min = min_criterion\n self.overall_max_path_length = len(self.min_path[0])\n\n if self.chosenScheduleIndex != self.globalMinSchedIdx[0]:\n self.chosenScheduleIndex = self.globalMinSchedIdx[0]\n self.chosenSchedule = self.schedules[self.chosenScheduleIndex]\n self.EConsumptionChosenSchedule = self.EConsumptionScheduleCurves[self.chosenScheduleIndex]\n # update SOC\n self.setSOC(self.SOCEnd[self.chosenScheduleIndex])\n # update modulation level\n self.setStateModlvl(self.chosenSchedule[-1])\n\n\n # inform all neighbors about origin that has local minimal criterion\n for n in range(len(self.Neighbors)):\n #structure: ['minimalorigin', ID_minimal_origin, minimal_criterion_value]\n #self.sendMessage(self.Neighbors[n], 70, ['minimalorigin', copy.deepcopy(origins_max_path_length[m]), copy.deepcopy(min_criterion), copy.deepcopy(self.min_path[s]), copy.deepcopy(self.min_path_schedules[s])])\n self.sendMessage(self.Neighbors[n], 70, ['minimalorigin', copy.deepcopy(self.CommID), copy.deepcopy(min_criterion), copy.deepcopy(self.min_path[0]), copy.deepcopy(self.min_path_schedules[0])])\n\n if self.OPTcriterion == 'maxmindiff':\n fluct_criterion = max(self.EFluctuationCurve) - min(self.EFluctuationCurve)\n elif self.OPTcriterion == 'absremainder':\n fluct_criterion = 0\n for a in range(len(self.EFluctuationCurve)):\n fluct_criterion += abs(self.EFluctuationCurve[a])\n\n\n #print 'ID {0}: criterion is: {1} , of origin {4}, path length: {2}, schedules: {5}, with improvement of {3} %'.format(self.CommID, min_criterion, len(self.min_path[s]), 100 - 100*(float((float(min_criterion))/float(fluct_max_min_diff))), origins_max_path_length[m], self.min_path_schedules[s] )\n self.log_message('ID {0}: criterion is: {1} , of origin {4}, path length: {2}, schedules: {5}, with improvement of {3} %'.format(self.CommID, min_criterion, len(self.min_path[0]), 100 - 100*(float((float(min_criterion))/float(fluct_criterion))), self.CommID, self.min_path_schedules[0] ))",
"def spatial_check(obs, nstnnets, lat, lon, elev, var, ivar, qc_flag):\n\n ndts = len(obs[:, 0, 0]) # number of hours in obs.\n roism = 100.0 # smaller radius of influence.\n roibg = 150.0 # bigger radius of influence.\n min_stations = 2 # min # of stns needed for testing.\n level1 = suspflag\n level2 = warnflag\n latdiff = 3.0\n londiff = 3.0\n\n thresholds = {\n 'pressure': (750.0, 1000.0, 1000.0),\n 'temp': (5.556, 8.333, 150.0), # (10degF), (15degF)\n 'dew': (5.556, 8.333, 150.0), # (10degF), (15degF)\n 'wind_speed': (7.65, 10.2, 250.0), # (15kts), (20kts)\n 'wind_dir': (360.0, 360.0, 250.0),\n 'rel_hum': (75.0, 85.0, 250.0),\n\n 'pcp6': (76.2, 101.6, 500.0), # (mm; eq 3 inches), (mm; eq 4 inches)\n 'pcp24': (152.4, 203.2, 500.0), # (mm; eq 6 inches), (mm; eq 8 inches).\n }\n\n try:\n maxvdiff1, maxvdiff2, max_elev_diff = thresholds[var]\n except KeyError:\n raise ValueError('Unrecognized variable')\n\n # If variable is precip, look for traces make them 0.0 (not permanently as these data values don't get sent back out)\n if var == 'pcp':\n for d in range(ndts):\n for s in range(nstnnets):\n if obs[d, s, ivar] == trace:\n obs[d, s, ivar] = 0.0 # obs[:,:,ivar]...\n\n # Cliff's simple similar neighbor test.\n\n for d in range(ndts):\n for s in range(nstnnets):\n if obs[d, s, ivar] == mvc or elev[d, s] == mvc or qc_flag[d, s, ivar, irangeflag] == failflag:\n qc_flag[d, s, ivar, ispatialflag] = notestflag\n continue\n\n valsm2 = []\n valbg2 = []\n\n # for each station, check it versus every other station (except itself). First time through get # of\n # stations within radius of influence to determine if we can do this test.\n for ss in range(nstnnets):\n if ss == s or obs[d, ss, ivar] == mvc \\\n or elev[d, ss] == mvc or lat[d, ss] == mvc or lon[d, ss] == mvc \\\n or abs(lat[d, ss] - lat[d, s]) > latdiff or abs(lon[d, ss] - lon[d, s]) > londiff \\\n or abs(elev[d, ss] - elev[d, s]) > max_elev_diff:\n continue\n if qc_flag[d, ss, ivar, irangeflag] == failflag \\\n or qc_flag[d, ss, ivar, istepflag] in [suspflag, warnflag] \\\n or qc_flag[d, ss, ivar, ipersistflag] in [suspflag, warnflag]:\n continue\n\n dist = distance(lat[d, s], lon[d, s], lat[d, ss], lon[d, ss])\n obsdiff = abs(obs[d, ss, ivar] - obs[d, s, ivar])\n\n if dist < roism:\n valsm2.append(obsdiff)\n\n elif dist < roibg:\n valbg2.append(obsdiff)\n\n # !--- If any obs found in roi was <= maxvdiff1, it's a pass. If none found <= maxvdiff1,\n # but one is >= maxvdiff1 & < maxvdiff2, it's \"suspect.\" Otherwise it's \"warning.\" Look in big roi too.\n if len(valsm2) >= min_stations:\n mindiffsm = min(valsm2)\n if mindiffsm <= maxvdiff1:\n qc_flag[d, s, ivar, ispatialflag] = passflag\n elif maxvdiff1 < mindiffsm <= maxvdiff2:\n qc_flag[d, s, ivar, ispatialflag] = level1\n else:\n qc_flag[d, s, ivar, ispatialflag] = level2\n elif len(valsm2) < min_stations <= len(valbg2):\n qc_flag[d, s, ivar, ispatialflag] = passflag if min(valbg2) <= maxvdiff2 else level1\n\n else: # not enough obs in either roi to do test.\n qc_flag[d, s, ivar, ispatialflag] = notestflag",
"def SK(all_black,all_white,all_other):\n real_zone_1=[]\n real_zone_2=[]\n real_zone_3=[]\n global p\n #FIRST defining the zone value since the more center you are, the\n #more value you will have.\n \n #Zone 1: the gratest value zone\n zone_1=[]\n zone_1_val=0.3\n for i in all_other:\n if 125<=int(i[0])<=1100 and 125<=int(i[1])<=825:\n zone_1.append(i)\n\n #zone 2: second greatest value zone\n zone_2=[]\n zone_2_val=0.2\n for i in all_other:\n if 0<=int(i[0])<=125 and 125<=int(i[1])<=825:\n zone_2.append(i)\n if 1100<=int(i[0])<=1225 and 125<=int(i[1])<=825:\n zone_2.append(i)\n if 125<=int(i[0])<=1100 and 0<=int(i[1])<=125:\n zone_2.append(i)\n if 125<=int(i[0])<=1100 and 825<=int(i[1])<=950:\n zone_2.append(i)\n\n #zone 3: smallest value zone\n zone_3=[]\n zone_3_val=0.1\n for i in all_other:\n if 0<=int(i[0])<=125 and 0<=int(i[1])<=125:\n zone_3.append(i)\n if 0<=int(i[0])<=125 and 825<=int(i[1])<=950:\n zone_3.append(i)\n if 1100<=int(i[0])<=1225 and 0<=int(i[1])<=125:\n zone_3.append(i)\n if 1100<=int(i[0])<=1225 and 825<=int(i[1])<=950:\n zone_3.append(i)\n\n if all_black==[] and all_white==[]:\n p=0 #First hand Black\n #all_black.append([25*25,19*25])\n return[25*25,19*25]\n\n\n \n\n #Calculation of the values\n val=0\n value_list=[] #[[coordinate],val]\n if p == 0: #First hand Black\n for i in all_black:\n x=i[0]\n y=i[1]\n #right down↘️\n if [x+25 ,y+25] in all_other:\n val=1\n value_list.append([[x+25,y+25],val])\n #print('右下 if',value_list)\n #print('Right D if',val)\n else:\n val=1\n for a in range(1,4):\n if [x+25*a,y+25*a] in all_black:\n val+=1\n elif [x+25*a,y+25*a] in all_other:\n value_list.append([[x+25*a,y+25*a],val])\n #print('Right D',val)\n #print('右下',value_list)\n elif [x+25*a,y+25*a] in all_white:\n break\n \n #left up↖️\n if [x-25,y-25] in all_other:\n val=1\n value_list.append([[x-25,y-25],val])\n #print('Left U if')\n else:\n val=1\n for a in range(1,4):\n if [x-25*a,y-25*a] in all_black:\n val+=1\n elif [x-25*a,y-25*a] in all_other:\n value_list.append([[x-25*a,y-25*a],val])\n #print('Left U')\n elif [x-25*a,y-25*a] in all_white:\n break\n \n #right up↗️ \n if [x+25,y-25] in all_other:\n val=1\n value_list.append([[x+25,y-25],val])\n #print('RU if')\n else:\n val=1\n for a in range(1,4):\n if [x+25*a,y-25*a] in all_black:\n val+=1\n elif [x+25*a,y-25*a] in all_other:\n value_list.append([[x+25*a,y-25*a],val])\n #print('右上')\n elif [x+25*a,y-25*a] in all_white:\n break\n\n #left down↙️\n if [x-25,y+25] in all_other:\n val=1\n value_list.append([[x-25,y+25],val])\n #print('左下 if') \n else:\n val=1\n for a in range(1,4):\n if [x-25*a,y+25*a] in all_black:\n val+=1\n elif [x-25*a,y+25*a] in all_other:\n value_list.append([[x-25*a,y+25*a],val])\n #print('左下')\n elif [x-25*a,y+25*a] in all_white:\n break\n\n #right➡️\n if [x+25,y] in all_other:\n val=1\n value_list.append([[x+25,y],val])\n #print('右',value_list)\n #print('右 if')\n else:\n val=1\n for a in range(1,4):\n if [x+25*a,y] in all_black:\n val+=1\n elif [x+25*a,y] in all_other:\n value_list.append([[x+25*a,y],val])\n #print('右')\n elif [x+25*a,y] in all_white:\n break\n\n #left⬅️ \n if [i[0]-25,i[1]] in all_other:\n val=1\n value_list.append([[i[0]-25,i[1]],val])\n #print('左', value_list)\n #print('左 if')\n else:\n val=1\n for a in range(1,4):\n if [i[0]-25*a,i[1]] in all_black:\n val+=1\n elif [i[0]-25*a,i[1]] in all_other:\n value_list.append([[i[0]-25*a,i[1]],val])\n #print('左')\n elif [i[0]-25*a,i[1]] in all_white:\n break\n\n #down⬇️ \n if [i[0],i[1]+25] in all_other:\n val=1\n value_list.append([[i[0],i[1]+25],val])\n #print('下', value_list)\n #print('下 if')\n else:\n val=1\n for a in range(1,4):\n if [i[0],i[1]+25*a] in all_black:\n val+=1\n elif [i[0],i[1]+25*a] in all_other:\n value_list.append([[i[0],i[1]+25*a],val])\n #print('下')\n elif [i[0],i[1]+25*a] in all_white:\n break\n \n #up⬆️\n if [i[0],i[1]-25] in all_other:\n val=1\n value_list.append([[i[0],i[1]-25],val])\n #print('上',value_list)\n #print('上 if')\n else:\n val=1\n for a in range(1,4):\n if [i[0],i[1]-25*a] in all_black:\n val+=1\n elif [i[0],i[1]-25*a] in all_other:\n value_list.append([[i[0],i[1]-25*a],val])\n #print('上')\n elif [i[0],i[1]-25*a] in all_white:\n break\n\n\n\n all_val=[]\n #print(value_list,'这是value_list')\n\n \n sum_value=[]\n coord=[]\n for a in value_list:\n if a[0] not in coord:\n coord.append(a[0])\n #print(coord)\n for b in coord:\n he=[]\n for c in value_list:\n if b == c[0]:\n he.append(c[1])\n #print(he,'这是和')\n sum_value.append([b,sum(he)])\n\n\n\n #print(sum_value,'同样坐标下val相加')\n for i in sum_value:\n all_val.append(i[1])\n #print(all_val,'所有的相加之后的val')\n numb=-1\n all_max=[]\n for v in all_val:\n numb+=1\n if v == max(all_val):\n max_val_list = value_list[numb][0] #max (x,y)\n if value_list[numb][0] in all_other:\n all_max.append(value_list[numb])\n \n \n #print(max(all_val),'max val')\n for u in all_max:\n if u[0] in zone_1:\n real_zone_1.append(u[0])\n if u[0] in zone_2:\n real_zone_2.append(u[0])\n if u[0] in zone_3:\n real_zone_3.append(u[0])\n if real_zone_1 != []:\n print('real_1')\n return real_zone_1[0]\n elif real_zone_2 != []:\n print('Its zone 2')\n return real_zone_2[0]\n elif real_zone_3 != []:\n print('Its zone 3')\n return real_zone_3[0]\n else:\n return \"mistake\""
] |
[
"0.6480234",
"0.6318352",
"0.6269479",
"0.6215623",
"0.6206157",
"0.6110717",
"0.60025865",
"0.5942108",
"0.5934246",
"0.5904729",
"0.5811288",
"0.5711787",
"0.57106215",
"0.56911725",
"0.5684194",
"0.5649491",
"0.5626808",
"0.56212485",
"0.55674076",
"0.5553363",
"0.5552577",
"0.5550375",
"0.5518935",
"0.5510778",
"0.54994935",
"0.5495334",
"0.5485145",
"0.54698217",
"0.54626256",
"0.54444784"
] |
0.6406745
|
1
|
Get preditors base on their distance The predictors are selected as following [1,2], [1,3], [1,4], [2,3], [2,4], [2,5], [2,6]
|
def __getpredictors_distance(self, staname, distance):
distfromsta = distance[staname]
del distfromsta[staname] # remove the station to be fill from the dataframe
distfromsta = distfromsta.sort_values()
stations = self.network.getsta(distfromsta.index.values)
# station = self.network.getsta(staname)
# Only 3 closest stations
# sel1 = [ (i,e) for i,e in zip(stations[0:2], stations[1:3])] # selction predictors with spacing 1
# sel2 = [ (i,e) for i,e in zip(stations[0:2], stations[2:4])] # selction predictors with spacing 2
# Use all stations
sel1 = [(i, e) for i, e in zip(stations[0:-1], stations[1:])] # selction predictors with spacing 1
sel2 = [(i, e) for i, e in zip(stations[0:-2], stations[2:])] # selction predictors with spacing 2
# sel3 = [ (i,e) for i,e in zip(stations[0:-3], stations[3:])] # selction predictors with spacing 3
# sel4 = [ (i,e) for i,e in zip(stations[0:-4], stations[4:])] # selction predictors with spacing 4
# Only 3 closest stations
# sel1names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:2], stations[1:3])] # selction predictors with spacing 1
# sel2names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:2], stations[2:4])] # selction predictors with spacing 1
# using all stations
sel1names = [(i.getpara('stanames'), e.getpara('stanames')) for i, e in
zip(stations[0:-1], stations[1:])] # selction predictors with spacing 1
sel2names = [(i.getpara('stanames'), e.getpara('stanames')) for i, e in
zip(stations[0:-2], stations[2:])] # selction predictors with spacing 1
# sel3names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:-3], stations[3:])] # selction predictors with spacing 1
# sel4names = [ (i.getpara('stanames'),e.getpara('stanames')) for i,e in zip(stations[0:-4], stations[4:])] # selction predictors with spacing 1
selection = [x for x in itertools.chain.from_iterable(itertools.izip_longest(sel1, sel2)) if x]
selectionnames = [x for x in itertools.chain.from_iterable(itertools.izip_longest(sel1names, sel2names)) if x]
return selection, selectionnames
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def getpredictors_distance( staname, distance):\n\n distfromsta = distance[staname]\n try:\n del distfromsta[staname] # remove the station to be fill from the dataframe\n except:\n pass\n distfromsta = distfromsta.sort_values()\n\n stations = distfromsta.index\n\n sel1 = [(i, e) for i, e in zip(stations[0:-1], stations[1:])] # selction predictors with spacing 1\n sel2 = [(i, e) for i, e in zip(stations[0:-2], stations[2:])] # selction predictors with spacing 2\n\n selection= [None] * (len(sel1) + len(sel2))\n selection[::2] = sel1\n selection[1::2] = sel2\n\n return selection[:4]",
"def predict(x):\n file_train = open('trains.pkl', \"rb\")\n train = pkl.load(file_train)\n y = []\n k = 5\n x_train = train[0]\n y_train = train[1]\n for q in range(100):\n distance = []\n for i in range(800):\n distance.append(np.linalg.norm(x[q] - x_train[i]))\n\n # distance.append(np.sqrt(sum((x[q] - x_train[i]) ** 2)))\n # u = (x[0] - x_train) ** 2\n # print(distance)\n # distance = np.sqrt([sum(b) for b in u])\n # print(distance)\n minarg = np.argsort(distance)\n i = np.array(np.zeros(10))\n j = 0\n while k not in i:\n i[y_train[minarg[j]]] += 1\n j += 1\n y.append(np.argmax(i))\n return y",
"def pred_for_user(self,u):\r\n ids=np.where(self.Y_data_n[:,0]==u)[0]\r\n items_rated_by_u=Y_data_n[ids,1].tolist()\r\n pred_ratings=[]\r\n for i in range(self.n_items):\r\n if i not in items_rated_by_u:\r\n pred_ratings.append(self.pred(u,i))\r\n return pred_ratings",
"def predict(self, X):\n\n Xn = np.copy(X)\n\n preds = []\n # compute distance from all points\n for x1 in Xn:\n dist = self._euclidian_distance(self.X_data, x1)\n dist = np.vstack((dist, self.y)).T\n dist = dist[dist[:, 0].argsort(axis=0)][:,-1]\n # get a vote from top k\n pred = sts.mode(dist[0:self.k])[0][0]\n preds.append(pred)\n\n return np.array(preds)",
"def oldPredict(self, data):\n\n predictions = []\n\n if len(self.observations) < self.k_neighbors:\n print(f\"Data length ({len(data)}) was too small.\")\n\n for row in data:\n neighbors_info = {}\n\n for row_index in range(len(self.observations)):\n distance = self.calcualteEuclideanDistance(self.observations[row_index], row)\n if len(neighbors_info) > self.k_neighbors - 1:\n largest_distance = max(neighbors_info.keys())\n if distance < largest_distance:\n neighbors_info[distance] = self.labels[row_index]\n del neighbors_info[largest_distance]\n else:\n neighbors_info[distance] = self.labels[row_index]\n\n unique_values = set(neighbors_info.values())\n if len(unique_values) == 1:\n value = unique_values.pop()\n predictions.append(value)\n else:\n best_value = 0\n best_value_weight = 0\n for label in unique_values:\n weight = 0\n for distance in neighbors_info.keys():\n if label == neighbors_info[distance]:\n if 'inverse_distance' == self.weight_type:\n weight += self.calulateWeightedVote(distance)\n elif 'no_weight' == self.weight_type:\n weight += 1\n else:\n print(\"Not a valid_weight_type.\")\n\n if weight > best_value_weight:\n best_value_weight = weight\n best_value = label\n\n predictions.append(best_value)\n # print(f\"Neighbors Info: {neighbors_info}\")\n\n return predictions",
"def predict(self, X):\n labels = []\n for i in range(0,len(X)):\n min_distance = distance.euclidean(X[i],self.best_medoids[0])\n min_distance_index = 0\n\n for j in range(1,len(self.best_medoids)):\n current_distance = distance.euclidean(X[i],self.best_medoids[j])\n if(current_distance < min_distance):\n min_distance = current_distance\n min_distance_index = j\n\n labels.append(min_distance_index)\n return labels\n\n pass",
"def predict(self, predPoints=None):",
"def predict_individual(self,Xtest,nn_list):\n\n #calculate distances first\n self.dist_calc(Xtest)\n\n ypred = []\n\n\n for nn in nn_list:\n\n neigh_ind = self.ind[:,nn-1] #subtract 1 since it is zero based\n\n ypred.append(self.ytrain[neigh_ind])\n\n self.ypred = ypred\n\n return ypred",
"def predict_individual(self,Xtest,nn_list):\n\n #calculate distances first\n self.dist_calc(Xtest)\n\n ypred = []\n\n\n for nn in nn_list:\n\n neigh_ind = self.ind[:,nn-1]# subtract 1 since it is zero based\n\n ypred.append(self.ytrain[neigh_ind])\n\n self.ypred = ypred\n\n return ypred",
"def get_prediction(data):\n # load cannabis data\n strains = pd.read_csv(URL)\n # Combine the Effects and Flavors in one column\n strains['Criteria'] = strains['Effects'] + ',' + strains['Flavor']\n\n # Train model on dtm\n nn = NearestNeighbors(n_neighbors=5, algorithm='ball_tree')\n nn.fit(dtm)\n\n # load request data\n # r = data.args\n entry = [v for k,v in data.items()][1:]\n #print(entry)\n # transform\n new = tf.transform(entry)\n #print(new)\n results = nn.kneighbors(new.todense())\n #print(results)\n # extract top 5 results\n output = [strains['Strain'][results[1][0][i]] for i in range(5)]\n\n return output",
"def predict(self, query: np.ndarray):\n assert query.shape == self._training_set[1, :-1].shape, \"Size of the query does not match the size of the\" \\\n \" training set, Which is: \"\\\n + str(self._training_set[1, :-1].shape)\n tmp = (self._training_set[:, :-1] - query).astype(float)\n distances = np.linalg.norm(tmp, axis=1)\n\n index = np.argsort(distances)\n sorted_set = self._training_set[index, :]\n\n (unique, counts) = np.unique(sorted_set[:self._k, -1], return_counts=True)\n\n return unique[counts == np.max(counts)][0]",
"def predict_only(self):",
"def preprocess(df):\n df[\"distance\"] = compute_distance(df)\n X_train = df[[\"distance\"]]\n y_train = df[\"fare_amount\"]\n return X_train, y_train",
"def get_predictors(self):\n\t\treturn self.predictors",
"def predict(self,data):\n results = []\n predict_instances = np.shape(data)[0]\n stored_instances = np.shape(self.data)[0]\n for predict_index in range(predict_instances):\n neighbors = [] # dist, label\n for stored_index in range(stored_instances):\n neighbors.append((self._distance(self.data[stored_index], data[predict_index]), self.data_labels[stored_index][0], data[predict_index]))\n neighbors = sorted(neighbors, key=lambda x: x[0])[:self.k]\n results.append(self._analyze_neighbors(neighbors))",
"def predict_labels(self, dists, k=1):\n num_test = dists.shape[0]\n y_pred = np.zeros(num_test)\n for i in range(num_test):\n indices = np.argsort(dists[i])[:k]\n closest_y = self.y_train[indices]\n y_pred_i = mode(closest_y)[0]\n y_pred[i] = y_pred_i\n return y_pred",
"def post_predictive_distribution(self, samples):\n post_pred_dist = []\n posteriors = self.posterior(samples)\n for point in range(1, self.max_val+1):\n post_pred = 0\n for concept, posterior in list(zip(self.concepts, posteriors)):\n if point in concept.extension:\n post_pred += posterior\n post_pred_dist.append(post_pred)\n return post_pred_dist",
"def predict(self, test):\n test_data = np.asarray(test)\n assert self.x is not None and self.y is not None, \"You must train the classifier before testing\"\n results = []\n for i in range(test_data.shape[0]):\n m = self.x - test_data[i]\n # dist holds the Euclidean distance to every training point\n dist = np.sum(m*m, 1)\n # this call uses a quickselect algo to find k-smallest\n ind = np.argpartition(dist, self.k)[:self.k]\n # take the class present the most among the k closest\n out = int(scipy.stats.mode(self.y[ind], axis=None)[0])\n results.append(out)\n return results",
"def _calc_distances(preds, targets, mask, normalize):\n N, K, _ = preds.shape\n _mask = mask.copy()\n _mask[np.where((normalize == 0).sum(1))[0], :] = False\n distances = np.full((N, K), -1, dtype=np.float32)\n normalize[np.where(normalize <= 0)] = 1000000.0\n distances[_mask] = np.linalg.norm(((preds - targets) / normalize[:, None, :])[_mask], axis=-1)\n return distances.T",
"def estimate_dists(self) -> np.array:\n return np.array(\n list(\n chain.from_iterable(\n model.estimate_dist(self.featurized_data)\n for model in self.models\n )\n )\n )",
"def predict(self,Xtest,nn_list):\n\n self.dist_calc(Xtest)\n xsize = self.dist.shape[0]\n ysize = self.ytrain.shape[1]\n ypred = []\n\n for nn in nn_list:\n\n yp = np.empty((xsize,ysize))\n\n if self.weights =='uniform':\n\n neigh_ind = self.ind[:,0:nn]\n\n for j in range(self.ytrain.shape[1]):\n\n mode = utilities.quick_mode_axis1_keep_nearest_neigh(\n self.ytrain[neigh_ind,j].astype(int))\n yp[:,j] = mode\n\n\n elif self.weights=='distance':\n dist = self.dist[:,0:nn]\n neigh_ind = self.ind[:,0:nn]\n W = 1./(dist+.000001) #to make sure we dont divide by zero\n\n for j in range(self.ytrain.shape[1]):\n mode, _ = utilities.weighted_mode(self.ytrain[neigh_ind,j].astype(int), W, axis=1)\n\n mode = np.asarray(mode.ravel(), dtype=int)\n\n yp[:, j] = mode\n\n ypred.append(yp)\n\n self.ypred = ypred\n\n return ypred",
"def predict(self, dists, k=1):\n s = np.argsort(dists, axis=1)\n y_pred = np.zeros(dists.shape[0])\n for i in range(dists.shape[0]):\n y_pred[i] = np.argmax(np.bincount(self.ytr[s[i,:k]]))\n return y_pred",
"def predict(self,X,y):\n self.X_test = X\n self.y_test = y\n d = []\n for i in range(self.X_train.shape[0]):\n d.append(self.get_distance(self.X_train.ix[i,:])) # hold all distances\n sorted = np.argsort(d)\n k_indices = np.argsort(d)[:self.k] # get indices with lowest distances\n predictions = self.y_train[k_indices]\n unique, counts = np.unique(predictions,return_counts=True)\n\n if (np.where(predictions ==1)[0].shape[0]) >self.p*self.k:\n y_pred = 1\n else:\n y_pred=0\n # {'sample':X_test.name,'d':d,'k_ix':k_indices,'pred':predictions,\n # 'counts':counts,'uniq':unique,'y_pred':y_pred,\n # 'y_test':self.y_test,'y_train':self.y_train,\n # 'sorted':sorted}\n return {'sample':self.X_test.name,\n 'y_pred':y_pred, \n 'y_test':self.y_test}",
"def nearest_neighbors_classifier(data):\n clf = KNeighborsClassifier(3, 'distance')\n clf.name = \"KNN\"\n train_predict_and_results(data, clf)",
"def predict(self,Xtest,nn_list):\n\n #calculate distances first\n self.dist_calc(Xtest)\n\n ypred = []\n\n for nn in nn_list:\n\n neigh_ind = self.ind[:,0:nn]\n\n if self.weights == 'uniform':\n\n p = np.mean(self.ytrain[neigh_ind], axis=1)\n\n elif self.weights =='distance':\n\n p = np.empty((self.dist.shape[0], self.ytrain.shape[1]), dtype=np.float)\n\n for i in range(self.ytrain.shape[1]):\n p[:,i] = utilities.weighted_mean(self.ytrain[neigh_ind,i], self.dist[:,0:nn])\n\n ypred.append(p)\n\n self.ypred = ypred\n self.nn_list = nn_list\n return ypred",
"def LevDistMultilabels(y_true, y_pred):\n \n n = y_pred.shape[0]\n D = 0\n for i in range(n):\n D += LevenshteinDistance(y_pred[i,:], y_true[i,:])[-1, -1]\n return D/n",
"def getDistances(trainingSet, testInstance, distances):\n # Empty list to store distances of between testInstance and each trainSet item\n # Number of dimensions to check\n length=len(testInstance) - 1\n # Iterate through all items in trainingSet and compute the distance, then append to the distances list\n for x in range(len(trainingSet)):\n dist=calculateDistance(testInstance, trainingSet[x], length)\n distances.append((trainingSet[x], dist))\n return distances",
"def _predict_base(self, X):\n\n # Return the indices of the BMU which matches the input data most\n distances = []\n\n prev_activation = np.zeros((self.map_dim, self.data_dim))\n\n for x in X:\n distance, prev_activation = self._get_bmus(x, prev_activation=prev_activation)\n distances.append(distance)\n\n return distances",
"def _get_closest(centers, features):\n pred_labels = []\n\n features = features\n for feature in features:\n distances = End2End._dist(centers, feature)\n pred_labels.append(distances.argmin().item())\n\n return np.array(pred_labels)",
"def predict(self, data):\n\t\treturn closestCluster(data, self.centers)"
] |
[
"0.65554714",
"0.59923244",
"0.5906513",
"0.5849182",
"0.5799373",
"0.5795326",
"0.5768941",
"0.5718649",
"0.57175404",
"0.5713419",
"0.5701458",
"0.5656988",
"0.5656353",
"0.56391543",
"0.562515",
"0.56004006",
"0.553719",
"0.5531781",
"0.55137074",
"0.5489938",
"0.5472367",
"0.5462772",
"0.54522556",
"0.54234236",
"0.5396595",
"0.5379135",
"0.5375029",
"0.5300419",
"0.5286575",
"0.52725637"
] |
0.63834083
|
1
|
Return a sorted selections by the correlation rsquared scores
|
def __sort_predictors_by_corr(self, station, selections, var, From, To, by, how, constant=True,
selectionsnames=None, sort_cor=True, cor_lim=None):
scores_corel = pd.DataFrame(index=np.arange(0, len(selections)), columns=['corel', 'selections', 'params',
'selectionname']) # correlation of each selections and variables
for i, (selection, selectionname) in enumerate(zip(selections, selectionsnames)):
try:
Y = station.getData(var, From=From, To=To, by=by, how=how) # variable to be filled
X1 = selection[0].getData(var, From=From, To=To, by=by, how=how) # stations variable used to fill
X2 = selection[1].getData(var, From=From, To=To, by=by, how=how) # stations variable used to fill
data = pd.concat([Y, X1, X2], keys=['Y', 'X1', 'X2'], axis=1, join='outer').dropna()
est = self.__MLR(data[['X1', 'X2']], data['Y'], constant=constant)
rsquared = est.rsquared
scores_corel.loc[i, 'corel'] = rsquared
scores_corel.loc[i, 'selections'] = selection
scores_corel.loc[i, 'selectionname'] = selectionname
if constant:
scores_corel.loc[i, 'params'] = [est.params[0], est.params[1], est.params[2]]
else:
scores_corel.loc[i, 'params'] = [est.params[0], est.params[1]]
except ValueError:
print('No data to do the multilinear regression. Put correlation = 0')
scores_corel.loc[i, 'selections'] = selection
scores_corel.loc[i, 'selectionname'] = selectionname
scores_corel.loc[i, 'corel'] = 0
scores_corel.loc[i, 'params'] = np.nan
if sort_cor:
scores_corel = scores_corel.sort_values('corel', ascending=False)
if cor_lim:
scores_corel = scores_corel[scores_corel['corel'] > cor_lim]
else:
scores_corel = scores_corel[scores_corel['corel'] > 0]
scores_corel.index = np.arange(0, len(scores_corel.index))
selections = scores_corel['selections'].values
params = scores_corel['params'].values
print("u" * 30)
print("Correlation coefficient of the multilinear regression")
print("u" * 30)
print(scores_corel[['corel', 'selectionname']])
print("u" * 30)
return selections, params
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def corr_list(self):\n c = self.df.corr().abs()\n s = c.unstack()\n so = s.sort_values(ascending=False)\n i = int(len(so) ** (1/2))\n charts = so[i:]\n charts = charts[::2]\n if len(charts) > 3:\n charts = charts[:3]\n return charts.index, charts.values",
"def compute_correlations(struc_df, option, gamma, alpha):\n n_states = len(np.unique(struc_df.objnum))\n nodes = network.temp_node_info()\n adjacency = network.adjacency_mat(nodes)\n L = compute_limit_matrix(0.5, adjacency, n_states)\n L_vector = L.flatten()\n M = learn_sr(struc_df, gamma, alpha)\n M = M[2, 6]\n M_vector = M.flatten()\n\n if option == \"norm\":\n print(\"Norm of L - M: \")\n print(la.norm(L_vector - M_vector, np.inf))\n\n if option == \"correlation\":\n print(\"Correlation of L, M: \")\n print(np.dot(L_vector, M_vector) /\n (la.norm(L_vector) * la.norm(M_vector)))",
"def get_top_correlations(dataframe,columns,frame_type='spark'):\n if frame_type == 'spark':\n import math\n correlation_list = []\n correlations_finished = [] #hold correlatons done to prevent repitition\n for i, col_i in enumerate(columns):\n for j, col_j in enumerate(columns):\n if col_i+col_j not in correlations_finished: # don't repeat\n columns = [col_i,col_j]\n correlation = dataframe.stat.corr(col_i,col_j)\n if math.isnan(correlation):\n correlation=0.0\n correlation_list.append({\n 'columns': columns,\n 'correlation': correlation,\n 'correlation_abs':math.fabs(correlation),\n })\n # print({\n # 'columns': columns,\n # 'correlation': correlation,\n # 'correlation_abs':math.fabs(correlation),\n # })\n correlations_finished.append(col_i+col_j)\n #sort the list so highest correlations are first\n correlation_list = sorted(correlation_list, key=lambda x: x['correlation_abs'], reverse=True)\n return correlation_list\n else:\n pass",
"def build_retrieved_list(self, scores):\n\n res = self.index.rank(scores)\n tmp_res = []\n # keep scores too\n tmp_scores = []\n\n # build the list\n tmp_res = []\n #print rank, \"<--\"\n for i, k in res:\n tmp_res.append( self.indices[i] )\n tmp_scores.append( k )\n\n\n # compute the difference with the difference\n diff = list(set(self.indices.values())-set(tmp_res))\n\n # shuffle to fill the rest of the list\n np.random.shuffle(diff)\n\n scores_diff = np.zeros( (len(diff,)) )\n\n final = []\n final_scores = []\n\n final.extend(tmp_res)\n final.extend(diff)\n\n final_scores.extend(tmp_scores)\n final_scores.extend(scores_diff)\n\n # remove extension for evaluation\n f = lambda x: x.split('.')[0]\n final = map(f, final)\n\n return final, final_scores",
"def comparator(self):\n return self.get_scores()",
"def correlation(self) -> List[float]:\n self.pearson_corr = self.sim_data[\"Human (mean)\"].corr(self.sim_data[\"assigned_sim\"], method=\"pearson\")\n self.spearman_corr = self.sim_data[\"Human (mean)\"].corr(self.sim_data[\"assigned_sim\"], method=\"spearman\")\n return [self.pearson_corr, self.spearman_corr]",
"def find_perfect_corr(df): \n corrMatrix = df.corr()\n corrMatrix.loc[:,:] = numpy.tril(corrMatrix.values, k = -1)\n already_in = set()\n result = []\n for col in corrMatrix:\n perfect_corr = corrMatrix[col][abs(numpy.round(corrMatrix[col],10)) == 1.00].index.tolist()\n if perfect_corr and col not in already_in:\n already_in.update(set(perfect_corr))\n perfect_corr.append(col)\n result.append(perfect_corr)\n toRemove = []\n for item in result:\n toRemove.append(item[1:(len(item)+1)])\n toRemove = sum(toRemove, [])\n return {'corrGroupings':result, 'toRemove':toRemove}",
"def __pick_clusters(self, mothur_results):\r\n # Sanity check\r\n if not 0 <= self.Params['Similarity'] <= 1:\r\n raise ValueError(\r\n 'Similarity threshold must be number between 0 and 1 '\r\n '(received %)' % similarity_threshold)\r\n\r\n # A lower mothur score means more otu's. To find otu's that\r\n # satisfy a similarity threshold of 0.9, we must find the\r\n # largest score less than or equal to (1 - 0.9 =) 0.1.\r\n score_threshold = 1 - self.Params['Similarity']\r\n\r\n my_score, my_otus = mothur_results.next()\r\n for score, otus in mothur_results:\r\n\r\n # Sanity check\r\n if score < my_score:\r\n raise ValueError(\r\n 'Mothur results not in ascending order. This is an error '\r\n 'in the Mothur application controller, and it should be '\r\n 'reported to the PyCogent developers.')\r\n\r\n if score <= score_threshold:\r\n my_score, my_otus = score, otus\r\n else:\r\n # Scores are only getting larger, so bail out now\r\n break\r\n return my_otus",
"def find_perfect_corr(df): \n corrMatrix = df.corr()\n corrMatrix.loc[:,:] = numpy.tril(corrMatrix.values, k = -1)\n already_in = set()\n result = []\n for col in corrMatrix:\n perfect_corr = corrMatrix[col][abs(numpy.round(corrMatrix[col],10)) == 1.00].index.tolist()\n if perfect_corr and col not in already_in:\n already_in.update(set(perfect_corr))\n perfect_corr.append(col)\n result.append(perfect_corr)\n toRemove = []\n for item in result:\n toRemove.append(item[1:(len(item)+1)])\n toRemove = sum(toRemove, [])\n return {'corrGroupings':result, 'toRemove':toRemove}",
"def compute_correlation_separability_score(self) -> float:\n sep_scores = pd.DataFrame.from_dict(self.separability_scores).to_numpy()\n sep_scores = minmax_scale(sep_scores)\n corrs = {}\n for tumor_pair in range(sep_scores.shape[1]):\n corr_sep_score = np.corrcoef(PATHO_PRIOR[:, tumor_pair], sep_scores[:, tumor_pair])\n corrs[tumor_pair] = corr_sep_score[1, 0]\n corrs['agg_with_risk'] = sum(\n np.array([val for _, val in corrs.items()]) *\n RISK\n ) \n corrs['agg'] = sum([val for key, val in corrs.items() if type(key)==int]) \n return corrs",
"def getCorrelationForSpecies(self, current_species, threshold):\n grouped = self.groupAllSamples()\n if self.corr_matrix is None or self.corr_signature is None or self.corr_signature[0] != grouped.iloc[:,len(self.tax_levels):-1].columns.tolist():\n corr_matrix = grouped.iloc[:,len(self.tax_levels):-2]\n corr_matrix.index = grouped[self.tax_level]\n self.corr_matrix = corr_matrix.transpose().corr(method='spearman')\n self.corr_signature = (list(corr_matrix.columns), self.tax_levels[0]) \n\n corr_matrix = self.corr_matrix.loc[:,current_species]\n text = 'spearman (rank) correlation >= ' + str(threshold) + ':\\n'\n\n corr_series = corr_matrix[abs(corr_matrix) >= threshold].sort_values(ascending=False)\n \n corr_matrix = grouped.iloc[:,len(self.tax_levels):-1]\n corr_matrix.index = grouped[self.tax_level]\n corr_list = []\n\n current_abundance = corr_matrix.loc[current_species,:corr_matrix.columns[-2]]\n list_index = []\n for name in corr_matrix.index:\n new_abundance = corr_matrix.loc[name,:][:-1]\n corr = '{0:.3}'.format(current_abundance.corr(new_abundance, method='spearman'))\n if corr != 'nan' and abs(float(corr)) >= threshold and current_species != name:\n corr_list.append('{0:.3}'.format(current_abundance.corr(new_abundance, method='spearman')))\n list_index.append(name)\n #rho, pval = spearmanr(current_abundance, new_abundance)\n #if rho != 'nan' and abs(float(corr)) >= threshold and current_species != name and pval <= 0.05:\n # #corr_list.append('{0:.3}'.format(rho))\n # #list_index.append(name)\n # print(name + '\\t' + str(rho) + '\\t' + str(pval))\n \n #for i in xrange(len(corr_list)):\n # if corr_list[i] != 'nan' and abs(float(corr_list[i])) >= threshold and current_species != corr_matrix.index[i]:\n # print(corr_matrix.index[i] + '\\t' + corr_list[i])\n corr_series = pd.Series(corr_list, index=list_index)\n return text, corr_series\n #return text, corr_series.map('{0:.3}'.format)",
"def calc_rocchio(original, relevant_vectors, nonrelevant_vectors):\n print('orig' + str(len(original)))\n if len(relevant_vectors) > 0: print('rv 1st len' + str(len(relevant_vectors[0])))\n if len(nonrelevant_vectors) > 0: print('nr 1st len' + str(len(nonrelevant_vectors[0])))\n rv_count = len(relevant_vectors)\n nr_count = len(nonrelevant_vectors)\n rv_sum = np.add.reduce(relevant_vectors)\n print('rv_sum' + str(rv_sum) + 'rv_count' + str(rv_count))\n nr_sum = np.add.reduce(nonrelevant_vectors)\n print('nr_sum' + str(nr_sum) + 'nr_count' + str(nr_count))\n updated_relevance = cg.ROCCHIO_ALPHA * original \\\n + cg.ROCCHIO_BETA * (1/rv_count if rv_count else 1) * rv_sum \\\n - cg.ROCCHIO_GAMMA * (1/nr_count if nr_count else 1) * nr_sum\n #only keep terms above minimum threshold (also serves to exclude negative values)\n print('before')\n print(updated_relevance[:40])\n updated_relevance = [0 if wgt < cg.ROCCHIO_MIN else wgt for wgt in updated_relevance]\n print('after')\n print(updated_relevance[:40])\n return updated_relevance",
"def correlation(data, method, caption):\n columns = list(data)\n coefficients = data.astype(float).corr(method=method)\n results = []\n for i in range(len(columns)):\n for j in range(i + 1, len(columns)):\n coefficient = coefficients[columns[i]][columns[j]]\n results.append((\n abs(coefficient), coefficient,\n columns[i] + ' x ' + columns[j]))\n print('# ' + caption + ', ' + method)\n for result in reversed(sorted(results)):\n abs_coefficient, coefficient, columns_pair = result\n print (coefficient, columns_pair)",
"def get_sorted_results(self):\n results = self.results.values()\n return sorted(results, key=lambda r: r.rank(), reverse=True)",
"def sorted_carnivores(self):\n fitness_dict = {carn: carn.fitness for carn in self.carnivores}\n sorted_tuples = dict(sorted(fitness_dict.items(), key=lambda x: x[1], reverse=True))\n\n return list(sorted_tuples.keys())",
"def eval_concreteness(scores: np.ndarray, word_pairs, num=100, gt_divisor=10, vecs_names=None, tablefmt='simple'):\n\n # Sort scores by first and second word's concreteness scores\n def print_conc(synset_agg, title):\n ids12 = wn_concreteness_for_pairs(word_pairs, synset_agg)\n # plot_scores(scores[ids1], gt_divisor, vecs_names, title=title)\n # plot_scores(scores[ids2], gt_divisor, vecs_names, title=title)\n # plot_scores(scores[ids12][:100], gt_divisor, vecs_names, title=title + ' - 100 least concrete')\n # plot_scores(scores[ids12][-100:], gt_divisor, vecs_names, title=title + ' - 100 most concrete')\n print(f'\\n-------- {num} least concrete - {title} -------\\n')\n print_correlations(scores[ids12][:num], name_pairs='gt', common_subset=False, tablefmt=tablefmt)\n print(f'\\n-------- {num} most concrete - {title} -------\\n')\n print_correlations(scores[ids12][-num:], name_pairs='gt', common_subset=False, tablefmt=tablefmt)\n\n # plots both for median concreteness of synsets and for the most concrete synset of words\n print_conc('median', 'Median synset concreteness')\n print_conc('most_conc', 'Most concrete synsets')",
"def abilityScores():\n\n scores_list = []\n\n for i in range(6):\n temp_list = []\n for j in range(4):\n temp_list.append(r.choice([1,2,3,4,5,6]))\n temp_list.sort()\n scores_list.append(temp_list[1]+temp_list[2]+temp_list[3])\n scores_list.sort()\n return scores_list",
"def recommend_from_scores(scores: List[List[float]], n: int) -> List[List[int]]:\n\n def top_idx(scores):\n return np.array(scores).argsort()[::-1][:n]\n\n return [top_idx(s) for s in scores]",
"def _select_matches(self, matches):\n matches = sorted(matches, key=lambda x: x.distance)\n matches = matches[:int(self._config['best_matches_percentage'] * len(matches))]\n return matches",
"def top_matches(prefs, person, n=5, similarity=sim_pearson):\n scores = [(similarity(prefs, person, other), other)\n for other in prefs if other != person]\n\n scores.sort()\n scores.reverse()\n return scores[0:n]",
"def _cont_cat_corr_features_anova(self, p_val = 0.01, subsamplesize = 100, p_seed = 0):\n \"\"\" Use ICC to define correlations, give box-plots for highly correlated pairs \"\"\"\n # TODO add option to do Bonferroni correction to adjust p-value depending on number of variables\n \n warnings.filterwarnings('ignore')\n # List of pairs along with correlation above threshold\n cont_cat_corr_list = []\n \n seed(p_seed)\n rand_vals = sample(range(self._n_rows), k=subsamplesize)\n \n # Search for the highly correlated pairs\n for i in self._cont_index_predictors: \n for j in self._cat_index_predictors:\n formula = self._dataset.columns[i] + \" ~ \" + self._dataset.columns[j] \n model_fit = ols(formula, data=self._dataset.iloc[rand_vals,:]).fit()\n anova_model = anova_lm(model_fit)\n p = anova_model.iloc[0,4]\n if p < p_val:\n cont_cat_corr_list.append([p,i,j]) #store correlation and columns index\n \n # Order variables by level of correlation \n s_cont_cat_corr_list = sorted(cont_cat_corr_list,key=lambda x: abs(x[0]))\n \n cont_cat_corr_features = []\n\n for v,i,j in s_cont_cat_corr_list:\n cont_cat_corr_features.append([self._dataset.columns[i],self._dataset.columns[j],v])\n \n return cont_cat_corr_features",
"def run_grouped_correlation(md_vals, otu_arrays, test, test_choices,\r\n pval_assignment_method, permutations=None):\r\n test_fn = test_choices[test]\r\n sample_sizes = map(len, md_vals)\r\n\r\n def _rho(otu_vals, md_vals):\r\n return test_fn(otu_vals, md_vals)\r\n # find the correlations. rhos is list of 1D arrays.\r\n rhos = []\r\n for i in range(len(md_vals)):\r\n rhos.append(apply_along_axis(_rho, 1, otu_arrays[i], md_vals[i]))\r\n pvals = []\r\n for i, group_rhos in enumerate(rhos):\r\n pvals_i = zeros(len(group_rhos))\r\n for j, rho in enumerate(group_rhos):\r\n pvals_i[j] = assign_correlation_pval(rho, sample_sizes[i],\r\n pval_assignment_method, permutations, test_fn, otu_arrays[\r\n i][j],\r\n md_vals[i])\r\n pvals.append(array(pvals_i))\r\n # calculate combined stats\r\n fisher_pvals = apply_along_axis(fisher, 0, array(pvals))\r\n fisher_rho_and_h = apply_along_axis(fisher_population_correlation, 0,\r\n array(rhos), sample_sizes)\r\n return (\r\n (rhos, pvals, fisher_pvals, fisher_rho_and_h[0], fisher_rho_and_h[1])\r\n )",
"def compute_cost_clarans(data, _cur_choice):\n # modified from that of CLARA\n total_cost = 0.0\n medoids = {}\n for idx in _cur_choice:\n medoids[idx] = []\n\n for i in list(data.index):\n choice = -1\n min_cost = np.inf\n for m in medoids:\n # fast_euclidean from CLARA\n tmp = np.linalg.norm(data.loc[m] - data.loc[i])\n if tmp < min_cost:\n choice = m\n min_cost = tmp\n\n medoids[choice].append(i)\n total_cost += min_cost\n # print(\"total_cost: \", total_cost)\n return total_cost, medoids",
"def calc_qcorr(self) -> Dict[int, float]:\n return self._calc_qcorr",
"def getSorteScoresFromScoreDict(queryRunDict):\n return list(sorted(queryRunDict.items(), key=lambda x: (x[1], x[0]), reverse=True))",
"def __get_score_ordered(scores, idx):\t\n\treturn [x[1][idx] for x in sorted(scores.items())]",
"def scatter_chart_score(self, grouped):\n score = np.abs(np.corrcoef(grouped.keys(), grouped.values)[0][1])\n if score > 0.3:\n score = 3\n return score",
"def get_scores(self):\n\n\t\tscores = np.dot(self.rankings, self.weights)\n\t\tranked_indices = np.argsort(scores)\n\t\tranked_sources = self.source_names[ranked_indices]\n\t\tranked_scores = sorted(scores)\n\t\tself.scores = {source:score for source, score in zip(ranked_sources, ranked_scores)}\n\n\t\treturn self.scores",
"def build_sorted_corelation(player_attributes):\n player_attributes_wo_na = player_attributes.dropna()\n player_attributes_corr = player_attributes_wo_na.corr()\n df_overall_rating_corr = player_attributes_corr[\"overall_rating\"]\n\n df_single_corr = pd.DataFrame(df_overall_rating_corr)\n df_single_corr = df_single_corr.sort_values(by=[\"overall_rating\"], ascending=True)\n\n # drop first three rows since we only care about positive correlation\n index_headers = list(df_single_corr[3:].index)\n plt.figure(figsize=(20, 10))\n plt.barh(\n y=index_headers,\n width=df_single_corr.overall_rating[3:],\n height=1,\n linewidth=0.5,\n )\n plt.show()",
"def compute_clf_based_correlation_score(stats, columns, col_name):\n full_col_data = columns[col_name]\n\n dt_clf = DecisionTreeClassifier()\n\n other_feature_names = []\n other_features = []\n for other_col_name in columns.columns:\n if other_col_name == col_name:\n continue\n\n other_feature_names.append(other_col_name)\n le = LabelEncoder()\n _stringified_col = list(map(str,columns[other_col_name]))\n le.fit(_stringified_col)\n other_features.append(list(le.transform(_stringified_col)))\n\n other_features_t = np.array(other_features, dtype=object).transpose()\n\n le = LabelEncoder()\n _stringified_col = list(map(str,full_col_data))\n le.fit(_stringified_col)\n y = le.transform(_stringified_col)\n dt_clf.fit(other_features_t,y)\n prediction_score = dt_clf.score(other_features_t,y)\n corr_scores = list(dt_clf.feature_importances_)\n highest_correlated_column = max(corr_scores)\n return {\n 'correlation_score': round(10 * (1 - prediction_score * highest_correlated_column))\n ,'highest_correlation': max(corr_scores)\n ,'most_correlated_column': other_feature_names[corr_scores.index(max(corr_scores))]\n ,'similarity_score_description':\"\"\"\n A high value for this score means that two of your columns are highly similar. This is done by trying to predict one column using the other via a simple DT.\n \"\"\"\n }"
] |
[
"0.5838652",
"0.55438906",
"0.5539825",
"0.5511799",
"0.5483375",
"0.54549277",
"0.54273504",
"0.5422278",
"0.5394091",
"0.536099",
"0.53258604",
"0.53121966",
"0.5309838",
"0.5289699",
"0.52868915",
"0.5261606",
"0.5250422",
"0.52314734",
"0.5225502",
"0.5213998",
"0.52039325",
"0.51935524",
"0.516331",
"0.5158532",
"0.5155567",
"0.515291",
"0.5152596",
"0.51503474",
"0.51387125",
"0.5133462"
] |
0.62526226
|
0
|
Gets a remote file of a bucket using a connection
|
def _get(conn, remote_file, bucket_name=BUCKET_NAME):
contents = None
try:
reply = conn.get(bucket_name, remote_file)
contents = reply.body
if reply.http_response.status != 200:
print 'Failed to fetch current_remote metadata'
contents = None
except:
contents = None
return contents
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def s3_get(url, temp_file):\n s3_resource = boto3.resource(\"s3\")\n bucket_name, s3_path = split_s3_path(url)\n s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)",
"def s3_get(url, temp_file):\n s3_resource = boto3.resource(\"s3\")\n bucket_name, s3_path = split_s3_path(url)\n s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)",
"def download(bucket, key):\n validate_bucket_name(bucket)\n validate_key_name(key)\n client = get_client()\n\n # do a buffered download\n bytes_io = io.BytesIO()\n client.download_fileobj(bucket, key, bytes_io)\n\n # hope that stuff is not too big, and just return content\n return bytes_io.getvalue()",
"def __retrieve_from_bucket(fname):\n blob = BUCKET.blob(fname)\n json_data = json.loads(blob.download_as_string())\n return json_data",
"def remote_resource(cloud_config):\n remote_uri = 'http://storage.googleapis.com/{}/'.format(\n cloud_config.storage_bucket)\n\n return lambda path, tmpdir: fetch_gcs_resource(\n remote_uri + path.strip('/'), tmpdir)",
"def download(self, bucket, object, filename=None):\n service = self.get_conn()\n downloaded_file_bytes = service \\\n .objects() \\\n .get_media(bucket=bucket, object=object) \\\n .execute()\n\n # Write the file to local file path, if requested.\n if filename:\n write_argument = 'wb' if isinstance(downloaded_file_bytes, bytes) else 'w'\n with open(filename, write_argument) as file_fd:\n file_fd.write(downloaded_file_bytes)\n\n return downloaded_file_bytes",
"def s3_get(url, temp_file, proxies=None):\n\ts3_resource = boto3.resource (\"s3\", config=Config (proxies=proxies))\n\tbucket_name, s3_path = split_s3_path (url)\n\ts3_resource.Bucket (bucket_name).download_fileobj (s3_path, temp_file)",
"def get_file(cls, url, working_dir):\n if url.lower().startswith(\"s3://\"):\n return cls._s3_get_file(url)\n elif url.lower().startswith(\"http\"):\n return cls._http_get_file(url)\n else:\n return cls._fs_get_file(url, working_dir)",
"def fetch(iid):\n if AWS_CLIENT_CONFIG and BUCKET_NAME:\n try:\n s3 = boto3.resource('s3', **AWS_CLIENT_CONFIG)\n obj = s3.Bucket(BUCKET_NAME).Object(iid).get()\n if obj:\n return obj.get('Body')\n except botocore.exceptions.ClientError as e:\n logger.error(e)\n else:\n # get locally from temp dir (tests, local development)\n return get_temp_file(iid)\n return None",
"def get_s3_object(self, remote_s3_url):\n try:\n _file = tempfile.mkstemp()[1]\n parsed_s3_path = remote_s3_url.split(\"/\", 3) # s3://bucket-name/key\n remote_bucket = parsed_s3_path[2] # Bucket name\n remote_key = parsed_s3_path[3] # Key\n self.download_file(remote_bucket, remote_key, _file)\n return _file\n except Exception as e:\n message = {'FILE': __file__.split('/')[-1],\n 'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}\n self.logger.exception(message)\n raise",
"def get_s3_object(bucket, key_name, local_file):\n\n tracer.put_metadata('object', f's3://{bucket}/{key_name}')\n\n try:\n s3_resource.Bucket(bucket).download_file(key_name, local_file)\n result = 'ok'\n tracer.put_annotation('OBJECT_DOWNLOAD', 'SUCCESS')\n except botocore.exceptions.ClientError as e:\n tracer.put_annotation('OBJECT_DOWNLOAD', 'FAILURE')\n if e.response['Error']['Code'] == '404':\n result = f'Error: s3://{bucket}/{key_name} does not exist'\n else:\n result = f'Error: {str(e)}'\n\n return(result)",
"def _do_retrieve(bucket_name, key_path, number_retries=DEFAULT_S3_RETRIES):\n try:\n return conn.get_object(Bucket=bucket_name, Key=key_path, ResponseContentType='string')\n except Exception:\n if number_retries > 0:\n print(\"s3_retrieve failed with incomplete read, retrying on %s\" % key_path)\n return _do_retrieve(bucket_name, key_path, number_retries=number_retries - 1)\n raise",
"def get_file(self, file_name: str) -> BytesIO:\n fl = BytesIO()\n self.client.download_fileobj(self.bucket, file_name, fl)\n fl.seek(0)\n return fl",
"def download_blob(bucket_name, source_blob_name):\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n\n return blob",
"def ReadRemoteFile(url) -> bytes:\n local_url = download_util.DownloadResource(url)\n return file_util.OpenFile(local_url).read()",
"def get_bucket_file_url(bucket, key):\n\t#https://s3.amazonaws.com/link-checker/2018-05-27-235740.txt\n\tfile_url = \"https://s3.amazonaws.com/\" + bucket + \"/\" + key\n\treturn file_url",
"def _s3_get_file(url):\n try:\n return S3().get_contents_from_url(url)\n except Exception as e:\n raise ScrBaseException(\"Could not load file from {0}: {1}\".format(url, e))",
"def download_bucket(blob_name, path_to_file):\r\n blob = bucket.blob(blob_name)\r\n blob.download_to_filename(path_to_file)",
"def get_file_s3(bucket, key):\n \n client = boto3.client('s3')\n return client.get_object(Bucket=bucket, Key=key)['Body'].read().decode('utf-8')",
"def download_file(bucket_name: str, object_name: str, file_path: str):\n # pylint: disable=import-outside-toplevel\n from sotaque_brasileiro.utils import safe_getenv\n\n minio_client = Minio(\n safe_getenv(constants.MINIO_ENDPOINT.value),\n access_key=safe_getenv(constants.MINIO_ACCESS_KEY.value),\n secret_key=safe_getenv(constants.MINIO_SECRET_KEY.value),\n )\n minio_client.fget_object(bucket_name, object_name, file_path)",
"def download(self, bucket_name, file_name, file_path):\n\n self.client.download_file(bucket_name, file_name, file_path)",
"def download_blob(bucket_name, source_blob_name, destination_file_name):\n storage_client = storage.Client()\n try:\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n \n blob.download_to_filename(destination_file_name)\n \n print('Blob {} downloaded to {}.'.format(\n source_blob_name,\n destination_file_name)) \n except:\n print(\"User does not have access to that bucket. Trying public link:\")\n gcs_url = 'https://%(bucket)s.storage.googleapis.com/%(file)s' % {'bucket':bucket_name, 'file':source_blob_name}\n urllib.urlretrieve(gcs_url, destination_file_name)\n print (\"Download complete\")",
"def get(self, src):\n tarName = \"%s/%s.tar\" % (self.bucket, src)\n if not self.client.exists(os.path.dirname(tarName)):\n return None\n # k = \"\"\n try:\n # with self.client.open(self.bucket + \"/\" + src) as f:\n # k = f.read()\n # copy_to_local(src: str, localdest: str, **kwargs)\n if not os.path.exists(os.path.dirname(tarName)):\n os.makedirs(os.path.dirname(tarName))\n self.client.copy_to_local(tarName, tarName)\n self.extractTar(os.path.dirname(tarName), tarName)\n except Exception as e:\n logger.info(\"Exception during get: %s\" % str(e))\n # return k",
"def get_blob(self, download_meta):\n bucket_name, key = self._get_bucket_key(download_meta)\n response = self.s3.get_object(Bucket=bucket_name,\n Key=key)\n return response['Body'].read().decode()",
"def get_remote_file(url, success=200, timeout=10):\n try:\n app.logger.info(\"GET: %s\" % url)\n auth = None\n res = requests.get(url, stream=True, timeout=timeout, auth=auth)\n if res.status_code == success:\n return res.headers.get('Content-Type', 'application/octet-stream'), res.raw.data\n except:\n pass\n return None, None",
"def _download_file(bucket: str, key: str) -> str:\n tmp_file_name = f\"/tmp/logs\"\n\n try:\n with open(tmp_file_name, \"wb\") as data:\n s3cl.download_fileobj(bucket, key, data)\n except Exception as e:\n print(type(e).__name__, e)\n f = open(tmp_file_name, \"w\")\n f.write(\"\")\n f.close()\n try:\n with gzip.open(tmp_file_name, mode=\"rt\") as f:\n x = f.read()\n return x\n except Exception as e:\n print(type(e).__name__, e, key)\n return \"\"",
"def get(self, bucket: str, object_name: str) -> bytes:\n raise NotImplementedError()",
"def scp_get_file(self, source_file, dest_file):\n self.scp_client.get(source_file, dest_file)",
"def get_image(filename):\n\n client.download_file(S3_BUCKET, filename, 'uploads/{}'.format(filename))",
"def get_remote_bytes(file_url) -> io.BytesIO:\n result = urlfetch.fetch(file_url)\n return io.BytesIO(result.content)"
] |
[
"0.68474334",
"0.68474334",
"0.6793478",
"0.6766179",
"0.6609071",
"0.6568017",
"0.6541442",
"0.6517729",
"0.6445589",
"0.64432293",
"0.64367956",
"0.6386914",
"0.6360486",
"0.630485",
"0.62856895",
"0.62814856",
"0.62789845",
"0.62708944",
"0.62665856",
"0.62540734",
"0.62386733",
"0.62182367",
"0.62100065",
"0.61916614",
"0.61643773",
"0.61561275",
"0.61527926",
"0.6138786",
"0.6133349",
"0.61329275"
] |
0.7393641
|
0
|
Put some contents into a remote_file of a bucket usign connection conn. Optionally the headers can be specified.
|
def _put(conn, remote_file, contents, bucket_name=BUCKET_NAME, headers=None):
error_msg = 'Failed to upload to %s' % remote_file
try:
reply = conn.put(bucket_name, remote_file,
S3.S3Object(contents), headers)
if reply.http_response.status != 200:
print error_msg
except:
print error_msg
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def upload_file(bucket, local_file_path, remote_destination_path):\n bucket = get_bucket(bucket)\n k = Key(bucket)\n k.key = remote_destination_path\n k.set_contents_from_filename(local_file_path)",
"def upload_file(conn, filename_local, filename_s3, gzip=False):\n\n filename_s3 = filename_s3.lstrip('./')\n\n file_descriptor = open(filename_local, 'rb')\n content = file_descriptor.read()\n\n content_type = _get_content_type(file_descriptor)\n headers = _get_headers(content_type)\n\n #should compress if the file is compressable and gzip is enabled\n can_be_gzipped = _file_can_be_compressed(filename_local)\n if gzip and can_be_gzipped:\n content = _compress_string(content)\n headers['Content-Length'] = str(len(content))\n headers['Content-Encoding'] = 'gzip'\n extension = mimetypes.guess_extension(content_type)\n #we should not overwrite the original file in the server.\n #We change extensions: style.css --> style.gz.css, for instance\n filename_s3 = filename_s3.rstrip(extension) + '.gz' + extension\n\n #if gzip is enabled and it is not compressable, don't upload nothing at all\n elif gzip and not can_be_gzipped:\n return\n\n #upload\n print 'Uploading %s to %s' % (filename_local, filename_s3)\n _put(conn, filename_s3, content, headers=headers)\n file_descriptor.close()",
"def send_file(self, fp, headers=None, cb=None, num_cb=10,\r\n query_args=None, chunked_transfer=False):\r\n provider = self.bucket.connection.provider\r\n\r\n def sender(http_conn, method, path, data, headers):\r\n http_conn.putrequest(method, path)\r\n for key in headers:\r\n http_conn.putheader(key, headers[key])\r\n http_conn.endheaders()\r\n if chunked_transfer:\r\n # MD5 for the stream has to be calculated on the fly, as\r\n # we don't know the size of the stream before hand.\r\n m = md5()\r\n else:\r\n fp.seek(0)\r\n\r\n save_debug = self.bucket.connection.debug\r\n self.bucket.connection.debug = 0\r\n # If the debuglevel < 3 we don't want to show connection\r\n # payload, so turn off HTTP connection-level debug output (to\r\n # be restored below).\r\n # Use the getattr approach to allow this to work in AppEngine.\r\n if getattr(http_conn, 'debuglevel', 0) < 3:\r\n http_conn.set_debuglevel(0)\r\n if cb:\r\n if chunked_transfer:\r\n # For chunked Transfer, we call the cb for every 1MB\r\n # of data transferred.\r\n cb_count = (1024 * 1024)/self.BufferSize\r\n self.size = 0\r\n elif num_cb > 2:\r\n cb_count = self.size / self.BufferSize / (num_cb-2)\r\n elif num_cb < 0:\r\n cb_count = -1\r\n else:\r\n cb_count = 0\r\n i = total_bytes = 0\r\n cb(total_bytes, self.size)\r\n l = fp.read(self.BufferSize)\r\n while len(l) > 0:\r\n if chunked_transfer:\r\n http_conn.send('%x;\\r\\n' % len(l))\r\n http_conn.send(l)\r\n http_conn.send('\\r\\n')\r\n else:\r\n http_conn.send(l)\r\n if cb:\r\n total_bytes += len(l)\r\n i += 1\r\n if i == cb_count or cb_count == -1:\r\n cb(total_bytes, self.size)\r\n i = 0\r\n if chunked_transfer:\r\n m.update(l)\r\n l = fp.read(self.BufferSize)\r\n if chunked_transfer:\r\n http_conn.send('0\\r\\n')\r\n http_conn.send('\\r\\n')\r\n if cb:\r\n self.size = total_bytes\r\n # Get the md5 which is calculated on the fly.\r\n self.md5 = m.hexdigest()\r\n else:\r\n fp.seek(0)\r\n if cb:\r\n cb(total_bytes, self.size)\r\n response = http_conn.getresponse()\r\n body = response.read()\r\n http_conn.set_debuglevel(save_debug)\r\n self.bucket.connection.debug = save_debug\r\n if ((response.status == 500 or response.status == 503 or\r\n response.getheader('location')) and not chunked_transfer):\r\n # we'll try again.\r\n return response\r\n elif response.status >= 200 and response.status <= 299:\r\n self.etag = response.getheader('etag')\r\n if self.etag != '\"%s\"' % self.md5:\r\n raise provider.storage_data_error(\r\n 'ETag from S3 did not match computed MD5')\r\n return response\r\n else:\r\n raise provider.storage_response_error(\r\n response.status, response.reason, body)\r\n\r\n if not headers:\r\n headers = {}\r\n else:\r\n headers = headers.copy()\r\n headers['User-Agent'] = UserAgent\r\n if self.base64md5:\r\n headers['Content-MD5'] = self.base64md5\r\n if self.storage_class != 'STANDARD':\r\n headers[provider.storage_class_header] = self.storage_class\r\n if headers.has_key('Content-Encoding'):\r\n self.content_encoding = headers['Content-Encoding']\r\n if headers.has_key('Content-Type'):\r\n self.content_type = headers['Content-Type']\r\n elif self.path:\r\n self.content_type = mimetypes.guess_type(self.path)[0]\r\n if self.content_type == None:\r\n self.content_type = self.DefaultContentType\r\n headers['Content-Type'] = self.content_type\r\n else:\r\n headers['Content-Type'] = self.content_type\r\n if not chunked_transfer:\r\n headers['Content-Length'] = str(self.size)\r\n headers['Expect'] = '100-Continue'\r\n headers = boto.utils.merge_meta(headers, self.metadata, provider)\r\n resp = self.bucket.connection.make_request('PUT', self.bucket.name,\r\n self.name, headers,\r\n sender=sender,\r\n query_args=query_args)\r\n self.handle_version_headers(resp, force=True)",
"def set_contents_from_file(self, fp, headers=None, replace=True,\r\n cb=None, num_cb=10, policy=None, md5=None,\r\n res_upload_handler=None):\r\n provider = self.bucket.connection.provider\r\n headers = headers or {}\r\n if policy:\r\n headers[provider.acl_header] = policy\r\n if hasattr(fp, 'name'):\r\n self.path = fp.name\r\n if self.bucket != None:\r\n if not md5:\r\n md5 = self.compute_md5(fp)\r\n else:\r\n # Even if md5 is provided, still need to set size of content.\r\n fp.seek(0, 2)\r\n self.size = fp.tell()\r\n fp.seek(0)\r\n self.md5 = md5[0]\r\n self.base64md5 = md5[1]\r\n if self.name == None:\r\n self.name = self.md5\r\n if not replace:\r\n k = self.bucket.lookup(self.name)\r\n if k:\r\n return\r\n if res_upload_handler:\r\n res_upload_handler.send_file(self, fp, headers, cb, num_cb)\r\n else:\r\n # Not a resumable transfer so use basic send_file mechanism.\r\n self.send_file(fp, headers, cb, num_cb)",
"def ingest_httpfile(self, url, dest, name=None, metadata={}, mimetype='application/octet-stream'):\n parsed = urlparse(url)\n if name is None:\n name = basename(parsed.path)\n try:\n tempfilename = download_tempfile(url)\n logger.debug(\"Downloaded file to: \"+tempfilename)\n with closing(open(tempfilename, 'rb')) as f:\n res = get_client().put(dest + name,\n f,\n metadata=metadata,\n mimetype=mimetype)\n if not res.ok():\n raise IOError(str(res))\n cdmi_info = res.json()\n logger.debug(\"put success for {0}\".format(json.dumps(cdmi_info)))\n except IOError as e:\n raise self.retry(exc=e)\n finally:\n os.remove(tempfilename)",
"def set_contents_from_stream(self, fp, headers=None, replace=True,\r\n cb=None, num_cb=10, policy=None,\r\n reduced_redundancy=False, query_args=None):\r\n\r\n provider = self.bucket.connection.provider\r\n if not provider.supports_chunked_transfer():\r\n raise BotoClientError('%s does not support chunked transfer'\r\n % provider.get_provider_name())\r\n\r\n # Name of the Object should be specified explicitly for Streams.\r\n if not self.name or self.name == '':\r\n raise BotoClientError('Cannot determine the destination '\r\n 'object name for the given stream')\r\n\r\n if headers is None:\r\n headers = {}\r\n if policy:\r\n headers[provider.acl_header] = policy\r\n\r\n # Set the Transfer Encoding for Streams.\r\n headers['Transfer-Encoding'] = 'chunked'\r\n\r\n if reduced_redundancy:\r\n self.storage_class = 'REDUCED_REDUNDANCY'\r\n if provider.storage_class_header:\r\n headers[provider.storage_class_header] = self.storage_class\r\n\r\n if self.bucket != None:\r\n if not replace:\r\n k = self.bucket.lookup(self.name)\r\n if k:\r\n return\r\n self.send_file(fp, headers, cb, num_cb, query_args,\r\n chunked_transfer=True)",
"def upload_file_handle(\n self,\n bucket: str,\n object_name: str,\n src_file_handle: typing.BinaryIO):\n raise NotImplementedError()",
"def test_put_object_from_file(self):\n self.get_file(20)\n response = self.bos.put_object_from_file(self.BUCKET, self.KEY, self.FILENAME)\n self.check_headers(response, [\"etag\"])",
"def upload_blob(self, bucket_name, file_name, contents):\n\n bucket = self.storage_client.bucket(bucket_name)\n blob = bucket.blob(file_name)\n blob.upload_from_string(contents)\n print(\n \"File {} uploaded to bucket {} as file {}.\".format(\n file_name, bucket_name, file_name\n )\n )",
"def set_contents_from_file(self, fp, headers=None, replace=True,\r\n cb=None, num_cb=10, policy=None, md5=None,\r\n reduced_redundancy=False, query_args=None,\r\n encrypt_key=False):\r\n provider = self.bucket.connection.provider\r\n if headers is None:\r\n headers = {}\r\n if policy:\r\n headers[provider.acl_header] = policy\r\n if encrypt_key:\r\n headers[provider.server_side_encryption_header] = 'AES256'\r\n\r\n if reduced_redundancy:\r\n self.storage_class = 'REDUCED_REDUNDANCY'\r\n if provider.storage_class_header:\r\n headers[provider.storage_class_header] = self.storage_class\r\n # TODO - What if provider doesn't support reduced reduncancy?\r\n # What if different providers provide different classes?\r\n if hasattr(fp, 'name'):\r\n self.path = fp.name\r\n if self.bucket != None:\r\n if not md5:\r\n md5 = self.compute_md5(fp)\r\n else:\r\n # even if md5 is provided, still need to set size of content\r\n fp.seek(0, 2)\r\n self.size = fp.tell()\r\n fp.seek(0)\r\n self.md5 = md5[0]\r\n self.base64md5 = md5[1]\r\n if self.name == None:\r\n self.name = self.md5\r\n if not replace:\r\n k = self.bucket.lookup(self.name)\r\n if k:\r\n return\r\n self.send_file(fp, headers, cb, num_cb, query_args)",
"def upload(filename, bucket):\n k = Key(bucket)\n k.key = uuid.uuid1().hex\n print \"Uploading batch to {}, key: {}...\".format(bucket.name, k.key)\n k.set_contents_from_filename(filename, reduced_redundancy=True)\n print \" Done.\"\n \n\n\n bucket = openBucket(dest)",
"def upload_file(\n self, bucket_id: uplink.Path, filename: uplink.Path, file: uplink.Body\n ):\n pass",
"def _add_files(self, category, files, session, bucket=None):\n\n with session[category].make_commit('master') as commit:\n for filename, content in files.items():\n if bucket:\n commit.put_file_url(\n filename,\n 's3://%s/%s' % (bucket, content)\n )\n else:\n commit.put_file_bytes(\n filename,\n content\n )",
"def cp(self, source: str, filename: str) -> None:\n\n now = datetime.datetime.utcnow()\n timestamp = now.strftime('%a, %d %b %Y %H:%M:%S GMT')\n headers = [\n ('Connection', 'keep-alive'),\n ('Content-Length', '0'),\n ('Date', timestamp),\n ('Host', '%s.s3.amazonaws.com' % self.bucket),\n ('x-amz-content-sha256', _EMPTY_SHA256_HASH),\n ('x-amz-copy-source', '/%s%s' % (self.bucket, source)),\n ]\n signed_headers = ';'.join(header[0].lower() for header in headers)\n canonical_request = 'PUT\\n%s\\n\\n%s\\n\\n%s\\n%s' % (filename, '\\n'.join(\n ('%s:%s' % (header[0].lower(), header[1])\n for header in headers)), signed_headers, _EMPTY_SHA256_HASH)\n logging.debug('canonical request %r',\n canonical_request.encode('utf-8'))\n string_to_sign = 'AWS4-HMAC-SHA256\\n%s\\n%s\\n%s' % (\n timestamp, self.scope,\n hashlib.sha256(canonical_request.encode('utf-8')).hexdigest())\n logging.debug('string to sign %r', string_to_sign.encode('utf-8'))\n\n signature = hmac.new(self.signing_key,\n string_to_sign.encode('utf-8'),\n digestmod='sha256').hexdigest()\n headers.append((\n 'Authorization',\n 'AWS4-HMAC-SHA256 Credential=%s/%s,SignedHeaders=%s,Signature=%s' %\n (self.aws_access_key, self.scope, signed_headers, signature)))\n if not self.conn:\n self.conn = http.client.HTTPSConnection('%s.s3.amazonaws.com' %\n self.bucket)\n try:\n self.conn.request('PUT', filename, headers=dict(headers))\n res = self.conn.getresponse()\n payload = res.read()\n except (http.client.BadStatusLine, http.client.ResponseNotReady,\n http.client.CannotSendRequest):\n self.conn.close()\n raise\n if res.status != 200:\n raise Exception(payload.decode('utf-8'))",
"def _upload(auth_http, project_id, bucket_name, file_path, object_name, acl):\n with open(file_path, 'rb') as f:\n data = f.read()\n content_type, content_encoding = mimetypes.guess_type(file_path)\n\n headers = {\n 'x-goog-project-id': project_id,\n 'x-goog-api-version': API_VERSION,\n 'x-goog-acl': acl,\n 'Content-Length': '%d' % len(data)\n }\n if content_type: headers['Content-Type'] = content_type\n if content_type: headers['Content-Encoding'] = content_encoding\n\n try:\n response, content = auth_http.request(\n 'http://%s.storage.googleapis.com/%s' % (bucket_name, object_name),\n method='PUT',\n headers=headers,\n body=data)\n except httplib2.ServerNotFoundError, se:\n raise Error(404, 'Server not found.')\n\n if response.status >= 300:\n raise Error(response.status, response.reason)\n\n return content",
"def put(self, path: str, filename: str) -> None:\n\n payload_hash, content_md5, length = _hash(path)\n\n now = datetime.datetime.utcnow()\n timestamp = now.strftime('%Y%m%dT%H%M%SZ')\n headers = [\n ('Connection', 'keep-alive'),\n ('Content-Length', str(length)),\n ('Content-MD5', content_md5),\n ('Content-Type', 'application/zip'),\n ('Date', now.strftime('%a, %d %b %Y %H:%M:%S GMT')),\n ('Host', '%s.s3.amazonaws.com' % self.bucket),\n ('x-amz-content-sha256', payload_hash),\n ('x-amz-date', timestamp),\n ]\n signed_headers = ';'.join(header[0].lower() for header in headers)\n canonical_request = 'PUT\\n%s\\n\\n%s\\n\\n%s\\n%s' % (filename, '\\n'.join(\n ('%s:%s' % (header[0].lower(), header[1])\n for header in headers)), signed_headers, payload_hash)\n logging.debug('canonical request %r',\n canonical_request.encode('utf-8'))\n string_to_sign = 'AWS4-HMAC-SHA256\\n%s\\n%s\\n%s' % (\n timestamp, self.scope,\n hashlib.sha256(canonical_request.encode('utf-8')).hexdigest())\n logging.debug('string to sign %r', string_to_sign.encode('utf-8'))\n\n signature = hmac.new(self.signing_key,\n string_to_sign.encode('utf-8'),\n digestmod='sha256').hexdigest()\n headers.append((\n 'Authorization',\n 'AWS4-HMAC-SHA256 Credential=%s/%s,SignedHeaders=%s,Signature=%s' %\n (self.aws_access_key, self.scope, signed_headers, signature)))\n with open(path, 'rb') as file_stream:\n if not self.conn:\n self.conn = http.client.HTTPSConnection('%s.s3.amazonaws.com' %\n self.bucket)\n try:\n self.conn.request('PUT',\n filename,\n file_stream,\n headers=dict(headers))\n res = self.conn.getresponse()\n payload = res.read()\n except (http.client.BadStatusLine, http.client.ResponseNotReady,\n http.client.CannotSendRequest):\n self.conn.close()\n raise\n if res.status != 200:\n raise Exception(payload.decode('utf-8'))",
"def upload_to_bucket(bucket_name, path_to_source_file, upload_file_name):\r\n\r\n try:\r\n # initialize client & get blob\r\n _, _, blob = create_client(bucket_name, upload_file_name)\r\n\r\n # set the path to source file\r\n blob.upload_from_filename(path_to_source_file)\r\n \r\n except Exception as err:\r\n raise err\r\n sys.exit(1)\r\n \r\n else:\r\n print(f\"upload file '{path_to_source_file}' succeed\")\r\n\r\n return None",
"def upload_file_to_icos(icos_obj, bucket: str, local_file_name: str, key: str) -> None:\r\n try:\r\n icos_obj.upload_file(Filename=local_file_name, Bucket=bucket, Key=key)\r\n except Exception as e:\r\n print(Exception, e)\r\n else:\r\n print('File `{}` uploaded to ICOS as `{}`.'.format(local_file_name, key))",
"def store_s3_contents ( s3_conn, bucket_name, key_name, key_contents = None, key_contents_filename = None ) :\n bucket = s3_conn.get_bucket( bucket_name )\n key = boto.s3.key.Key( bucket )\n key.key = key_name\n if ( key_contents_filename ) :\n key.set_contents_from_filename( key_contents_filename )\n else :\n key.set_contents_from_string( key_contents )",
"def _s3cmd_put(src_path, bucket):\n if not os.path.exists(env.s3cmd_cfg):\n abort(\"Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'.\" % env)\n\n with lcd(env.sites_path):\n local('fablib/bin/s3cmd --config=%s put' \\\n ' --rexclude \".*/\\.[^/]*$\"' \\\n ' --acl-public' \\\n ' --add-header=\"Cache-Control:max-age=300\"' \\\n ' -r %s/ s3://%s/' \\\n % (env.s3cmd_cfg, src_path, bucket))",
"def put_object(self, account, container, object, content):#put a file to server\n \n pass",
"def upload_file(self, instance, local_obj, remote_file):\n client = self.connect(instance)\n try:\n sftp = client.open_sftp()\n try:\n self._send_file(sftp, local_obj, remote_file)\n finally:\n sftp.close()\n finally:\n client.close()",
"def upload_file(self, keyUrl='', body='', ContentType='', bucket=None):\n \n if bucket is None:\n bucket = self.AWS_S3_BUCKET\n \n #Verificamos si existe body\n if body is None:\n body=''\n \n try:\n self.get_s3_client().put_object(Bucket=bucket, Key=keyUrl, Body=body, ACL='public-read', ContentType=ContentType)\n return True\n \n except ClientError as e:\n return False",
"def putFile(filename, file = None, localFilename = None):\n if not file and not filename:\n print(\"Please pass a valid file or filename\")\n\n if filename and not file:\n file = open(filename, \"rb\")\n\n print(\"filename: {}, file: {}\".format(filename, file))\n r = requests.put(\"{host}/{filename}\".format(host = host, filename = filename), files = {\"file\": file})\n return (r.ok, r.status_code, r.text)",
"def upload(self, remote, local, force = False):\n fl = self.list([ remote ])\n if force == False and remote in fl:\n remote_hash = fl[remote]\n h = hashlib.sha256()\n commonl.hash_file(h, local)\n if remote_hash == h.hexdigest():\n # remote hash is the same, no need to upload\n return\n\n with io.open(local, \"rb\") as inf:\n self.target.ttbd_iface_call(\"store\", \"file\", method = \"POST\",\n file_path = remote,\n files = { 'file': inf })",
"def test_put_object_from_file_user_headers(self):\n\n user_headers = {\"Cache-Control\":\"private\", \n \"Content-Disposition\":\"attachment; filename=\\\"abc.txt\\\"\", \n \"Expires\":\"123456\"}\n\n self.get_file(5)\n response = self.bos.put_object_from_file(bucket=self.BUCKET,\n key=\"test_put_file_user_headers\",\n file_name=self.FILENAME,\n user_headers=user_headers)\n self.check_headers(response)\n\n response = self.bos.get_object_meta_data(bucket_name=self.BUCKET, \n key='test_put_file_user_headers')\n self.assertEqual(response.metadata.expires, \"123456\")\n self.assertEqual(response.metadata.content_disposition, 'attachment; filename=\"abc.txt\"')\n self.assertEqual(response.metadata.cache_control, 'private')",
"def upload(filename, bucket):\n print(\"Uploading {} to S3\".format(filename.lower().replace('_', '-')))\n url = \"https://s3.ca-central-1.amazonaws.com/{}/{}\".format(bucket,\n filename.lower().replace('_', '-'))\n with open('{}/{}'.format(WORK_DIR, filename), 'rb') as data:\n requests.put(url, data=data)",
"def get_file(self, fp, headers=None, cb=None, num_cb=10,\r\n torrent=False, version_id=None, override_num_retries=None,\r\n response_headers=None):\r\n if cb:\r\n if num_cb > 2:\r\n cb_count = self.size / self.BufferSize / (num_cb-2)\r\n elif num_cb < 0:\r\n cb_count = -1\r\n else:\r\n cb_count = 0\r\n i = total_bytes = 0\r\n cb(total_bytes, self.size)\r\n save_debug = self.bucket.connection.debug\r\n if self.bucket.connection.debug == 1:\r\n self.bucket.connection.debug = 0\r\n\r\n query_args = []\r\n if torrent:\r\n query_args.append('torrent')\r\n # If a version_id is passed in, use that. If not, check to see\r\n # if the Key object has an explicit version_id and, if so, use that.\r\n # Otherwise, don't pass a version_id query param.\r\n if version_id is None:\r\n version_id = self.version_id\r\n if version_id:\r\n query_args.append('versionId=%s' % version_id)\r\n if response_headers:\r\n for key in response_headers:\r\n query_args.append('%s=%s' % (key, response_headers[key]))\r\n query_args = '&'.join(query_args)\r\n self.open('r', headers, query_args=query_args,\r\n override_num_retries=override_num_retries)\r\n for bytes in self:\r\n fp.write(bytes)\r\n if cb:\r\n total_bytes += len(bytes)\r\n i += 1\r\n if i == cb_count or cb_count == -1:\r\n cb(total_bytes, self.size)\r\n i = 0\r\n if cb:\r\n cb(total_bytes, self.size)\r\n self.close()\r\n self.bucket.connection.debug = save_debug",
"def _put(self, src_fname, dst_fname):\n logging.info('Transferring file %s to %s', src_fname, self._ip_addr)\n sftp_cli = self._get_sftp_client()\n if sftp_cli is None:\n raise Exception('Not supported without ssh.')\n return sftp_cli.put(src_fname, dst_fname)",
"def put_object(self, bucket, key, local_file_path=None, file_bytes=None) -> None:\n def upload_to_s3(byte_array):\n self.resource.Object(bucket, key).put(Body=byte_array)\n\n if file_bytes:\n upload_to_s3(file_bytes)\n else:\n with open(local_file_path, 'rb') as local_file:\n self.resource.Object(bucket, key).put(Body=local_file)"
] |
[
"0.6534801",
"0.6233578",
"0.6144909",
"0.6129894",
"0.5970255",
"0.59591484",
"0.59422994",
"0.5909662",
"0.5902494",
"0.58096933",
"0.57955354",
"0.5794321",
"0.57661766",
"0.5737979",
"0.57304716",
"0.5720829",
"0.5701128",
"0.5676588",
"0.56659216",
"0.56574786",
"0.56344366",
"0.56267023",
"0.5618739",
"0.5609733",
"0.5602718",
"0.56025225",
"0.5596587",
"0.559401",
"0.5587667",
"0.5583829"
] |
0.7733097
|
0
|
Guess the content_type, by using its file descriptor
|
def _get_content_type(file_descriptor):
content_type = mimetypes.guess_type(file_descriptor.name)[0]
if not content_type:
content_type = 'text/plain'
return content_type
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_content_type(filename):\n return mimetypes.guess_type(filename)[0] or 'application/octet-stream'",
"def guess_content_type ( self, path_info ) :\n _type, _enc = guess_type ( path_info )\n return _type",
"def guess_content_type(filename):\n return mimetypes.guess_type(filename)[0]",
"def guess_type(content):\n global mimeLock\n global mimeInitialized\n\n if not mimeInitialized:\n with mimeLock:\n if not mimeInitialized:\n mimetypes.init()\n mimeInitialized = True\n guessed = mimetypes.guess_type(content)\n\n if guessed[1] is None:\n guessed = (guessed[0], \"\")\n\n return guessed",
"def GetContentType(filename):\r\n return mimetypes.guess_type(filename)[0] or 'application/octet-stream'",
"def GetContentType(filename):\n return mimetypes.guess_type(filename)[0] or 'application/octet-stream'",
"def get_content_type(ct):\n content_type = ct\n\n if ct == \"csv\":\n content_type = \"text/csv\"\n elif ct == \"json\":\n content_type = \"application/json\"\n\n return content_type",
"def guess_type_from_content(file_obj):\n first_bytes = file_obj.read(2)\n if first_bytes == b\"PK\":\n filetype = \"xlsx\"\n else:\n content = file_obj.read()\n if b\"\\t\" in content:\n filetype = \"tsv\"\n else:\n filetype = \"csv\"\n return filetype",
"def guess_mimetype(filename):\n m, encoding = mimetypes.guess_type(filename)\n if encoding:\n m = ENCODING_MIMETYPES.get(encoding, None)\n return m or \"application/octet-stream\"",
"def detect_content_type(self, path=None, payload=None):\n\n f = file_path(path, payload)\n switches = [\"-d\", f]\n result = self._command_template(switches).lower()\n return result, path, f",
"def get_content_type(file_path):\n\n try:\n magic_obj = magic.Magic(mime=True)\n magic_obj.file = magic_obj.from_file\n except AttributeError as e:\n magic_obj = magic.open(magic.MAGIC_MIME_TYPE)\n magic_obj.load()\n\n content_type = magic_obj.file(file_path)\n return content_type",
"def _guess_mimetype(self, file):\n if not is_exe_in_path('file'):\n return self.DEFAULT_MIMETYPE\n\n # The browser didn't know what this was, so we'll need to do\n # some guess work. If we have 'file' available, use that to\n # figure it out.\n p = subprocess.Popen(['file', '--mime-type', '-b', '-'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdin=subprocess.PIPE)\n\n # Write the content from the file until file has enough data to\n # make a determination.\n for chunk in file.chunks():\n try:\n p.stdin.write(chunk)\n except IOError:\n # file closed, so we hopefully have an answer.\n break\n\n p.stdin.close()\n ret = p.wait()\n\n if ret == 0:\n mimetype = p.stdout.read().strip()\n else:\n mimetype = None\n\n # Reset the read position so we can properly save this.\n file.seek(0)\n\n return mimetype or self.DEFAULT_MIMETYPE",
"def content_type(self):\n return self.environ.get('CONTENT_TYPE') or 'application/octet-stream'",
"def _check_url_file_type(headers: Dict[str, str]) -> Optional[str]:\n content_type = headers.get(\"content-type\", \"\").lower()\n file_type = None\n\n for extension in SUPPORTED_MIME_TYPES.keys():\n for mime_type in SUPPORTED_MIME_TYPES.get(extension, []):\n if mime_type in content_type:\n file_type = extension\n break\n\n return file_type",
"def best_match_content_type(self):\n # First lookup http request path\n parts = self.path.rsplit('.', 1)\n if len(parts) > 1:\n _format = parts[1]\n if _format in ['json', 'xml']:\n return 'application/{0}'.format(_format)\n\n #Then look up content header\n type_from_header = self.get_content_type()\n if type_from_header:\n return type_from_header\n ctypes = ['application/json', 'application/xml']\n\n #Finally search in Accept-* headers\n bm = self.accept.best_match(ctypes)\n return bm or 'application/json'",
"def content_type(self):\n return self._headers.get(\"content-type\")",
"def get_ctype(f):\n return mimetypes.guess_type(f)[0]",
"def getContentType(content):\n\n xml = 'application/xml'\n\n if isXML(content):\n return xml\n elif content == '':\n return xml\n elif content is None:\n return xml\n else:\n return 'application/octet-stream'",
"def content_type(self) -> str:\n raw = self._headers.get(hdrs.CONTENT_TYPE) # type: ignore[attr-defined]\n if self._stored_content_type != raw:\n self._parse_content_type(raw)\n return self._content_type # type: ignore[return-value]",
"def get_mime_type(file):\n initial_pos = file.tell()\n file.seek(0)\n mime_type = magic.from_buffer(file.read(2048), mime=True)\n file.seek(initial_pos)\n return mime_type",
"def content_type(self):\n return self.guess_content_type(self.store_key)",
"def get_content_type(self, headers):\n if headers:\n for h, val in headers.items():\n if h.lower().strip() == 'content-type':\n # As it turns out, content-type often appears with some\n # additional values e.g \"text/css; charset=utf8\" so we want\n # just 'text/css' rather than the whole string\n return val[0].split(\";\")[0]\n return \"\"",
"def mimetype(self) -> 'Mimetype':\n if self._mimetype:\n # We take the mimetype reported in the dataset as authoritative.\n return Mimetype(self._mimetype)\n # If no mimetype is specified explicitly, we fall back to mimetype detection mechanisms:\n if self.scheme in ['file', 'http', 'https']:\n mt, _ = mimetypes.guess_type(self.parsed_url.path)\n if mt:\n return Mimetype(mt)\n if self.scheme == 'data':\n mt, _, data = self.parsed_url.path.partition(',')\n if mt.endswith(';base64'):\n mt = mt.replace(';base64', '').strip()\n if mt:\n return Mimetype(mt)\n # There's an explicit default mimetype for data URLs!\n return Mimetype('text/plain;charset=US-ASCII')\n if self.scheme in ['http', 'https']:\n res = urllib.request.urlopen(urllib.request.Request(self.url, method=\"HEAD\"))\n mt = res.headers.get('Content-Type')\n if mt:\n return Mimetype(mt)\n return Mimetype('application/octet-stream')",
"def guess_mimetype(fn, default=\"application/octet-stream\"):\n if \".\" not in fn:\n return default\n bfn, ext = fn.lower().rsplit(\".\", 1)\n if ext == \"jpg\": ext = \"jpeg\"\n return mimetypes.guess_type(bfn + \".\" + ext)[0] or default",
"def mime_type(filename):\n mtype, encoding = guess_type(filename, False)\n if encoding is None:\n return mtype or \"application/octet-stream\"\n elif encoding == \"gzip\":\n # application/gzip is defined by RFC 6713\n return \"application/gzip\"\n # Note that there is a \"+gzip\" MIME structured syntax suffix specified\n # in an RFC draft that may one day mean the correct code is:\n # return mtype + '+gzip'\n else:\n return \"application/x-\" + encoding",
"def content_type(self):\n return self._headers['CONTENT-TYPE']",
"def get_content_type(self):\n if hasattr(self, '_content_type'):\n return self._content_type\n mimetype = None\n querystring_mimetype = self.request.get('mimetype')\n acceptheader = self.request.getHeader('Accept')\n\n if querystring_mimetype and querystring_mimetype in self.content_types:\n mimetype = querystring_mimetype\n else:\n querystring_error = 'No acceptable mimetype in QUERY_STRING: {0}'.format(querystring_mimetype)\n if acceptheader:\n mimetype = self.content_types.negotiate_accept_header(acceptheader)\n if not mimetype:\n acceptheader_error = 'No acceptable mimetype in ACCEPT header: {0}'.format(acceptheader)\n raise CouldNotDetermineContentType(querystring_error=querystring_error,\n acceptheader_error=acceptheader_error,\n acceptable_mimetypes=self.content_types.get_mimetypelist())\n content_type = self.content_types[mimetype]\n self._content_type = content_type\n return content_type",
"def file_type(filename, stream=False):\n magic_dict = {\"\\x1f\\x8b\\x08\": \"gz\",\n \"\\x42\\x5a\\x68\": \"bz2\",\n \"\\x50\\x4b\\x03\\x04\": \"zip\",\n b\"\\x50\\x4b\\x03\\x04\": \"zip\",\n \"PK\\x03\\x04\": \"zip\",\n b\"PK\\x03\\x04\": \"zip\",\n }\n\n max_len = max(len(x) for x in magic_dict)\n if not stream:\n with open(filename) as f:\n file_start = f.read(max_len)\n for magic, filetype in magic_dict.items():\n if file_start.startswith(magic):\n return filetype\n else:\n for magic, filetype in magic_dict.items():\n if filename[:len(magic)] == magic:\n return filetype\n\n return None",
"def get_file_type(file_str):\n process_list = [\"file\", \"--mime-type\", file_str]\n p = subprocess.Popen(process_list, stdout=subprocess.PIPE)\n file_type, err = p.communicate()\n\n return file_type.decode(\"utf-8\")",
"def get_media_type(f):\n tipe = mimetypes.guess_type(f)\n if tipe[0]:\n if \"image\" in tipe[0]:\n return \"image\"\n elif \"video\" in tipe[0]:\n return \"video\"\n elif \"audio\" in tipe[0]:\n return \"audio\""
] |
[
"0.7407128",
"0.74055415",
"0.7343658",
"0.7118253",
"0.70626557",
"0.7053132",
"0.70111525",
"0.6947122",
"0.68888956",
"0.68175477",
"0.6781097",
"0.67605287",
"0.67187476",
"0.6655244",
"0.6640195",
"0.66329396",
"0.660614",
"0.66005784",
"0.65136355",
"0.6447863",
"0.6439395",
"0.64054847",
"0.6404971",
"0.64015704",
"0.63843954",
"0.63714516",
"0.63662547",
"0.6356395",
"0.632482",
"0.6318174"
] |
0.7975829
|
0
|
Asserts if a given file (w/ name filename) can be compressed. content_type is optional and can speed up assertion. Should return True if it is a Text Type (CSS/JS)
|
def _file_can_be_compressed(filename):
content_type = ''
with open(filename, 'rb') as f:
content_type = _get_content_type(f)
return content_type in TEXT_TYPES
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def isgzip(filename):\n magic_number = b'\\x1f\\x8b\\x08'\n with open(filename, 'rb') as f:\n file_start = f.read(len(magic_number))\n\n if magic_number == file_start:\n return True\n return False",
"def is_archive(afile):\n return file_ext(os.path.basename(afile)) in ARCHIVE_COMPRESS_FORMATS",
"def test_gzip(handler,config):\r\n if not config.gzip:\r\n return False\r\n if not gzip_support:\r\n return False\r\n accept_encoding = handler.headers.get('accept-encoding','').split(',')\r\n accept_encoding = [ x.strip() for x in accept_encoding ]\r\n ctype = handler.resp_headers[\"Content-type\"]\r\n # if gzip is supported by the user agent,\r\n # and if the option gzip in the configuration file is set, \r\n # and content type is text/ or javascript, \r\n # set Content-Encoding to 'gzip' and return True\r\n if 'gzip' in accept_encoding and \\\r\n ctype and (ctype.startswith('text/') or \r\n ctype=='application/x-javascript'):\r\n return True\r\n return False",
"def is_zip(filepath):\n\treturn os.path.splitext(filepath)[1] == '.gz'",
"def identify_compression(file_path: str) -> Optional[str]:\r\n sign_dict = {\r\n b\"\\x1f\\x8b\\x08\": \"gz\",\r\n b\"\\x42\\x5a\\x68\": \"bz2\",\r\n b\"\\x50\\x4b\\x03\\x04\": \"zip\",\r\n b\"\\x37\\x7a\\xbc\\xaf\\x27\\x1c\": \"7z\",\r\n b\"\\x75\\x73\\x74\\x61\\x72\": \"tar\",\r\n b\"\\x52\\x61\\x72\\x21\\x1a\\x07\\x00\": \"rar\",\r\n }\r\n\r\n max_len = max(len(x) for x in sign_dict)\r\n with open(file_path, \"rb\") as f:\r\n file_start = f.read(max_len)\r\n for magic, filetype in sign_dict.items():\r\n if file_start.startswith(magic):\r\n return filetype\r\n return None",
"def is_gzipped(infile):\n logger = logging.getLogger(__name__)\n\n magic_number = b'\\x1f\\x8b'\n f = open(infile, 'rb')\n with f:\n try:\n assert f.read(2) == magic_number\n except AssertionError as e:\n logger.info(f'{infile} is not gzipped')\n return False\n else:\n logger.debug(f'{infile} is gzipped')\n return True",
"def _is_archive(local_path: str) -> bool:\n archive_mimetypes = [\n \"application/zip\",\n \"application/x-tar\",\n \"application/x-gzip\",\n \"application/x-bzip2\",\n \"application/x-7z-compressed\",\n \"application/x-rar-compressed\",\n \"application/x-xz\",\n \"application/x-lzip\",\n \"application/x-lzma\",\n \"application/x-lzop\",\n \"application/x-bzip\",\n \"application/x-bzip2\",\n \"application/x-compress\",\n \"application/x-compressed\",\n ]\n\n return mimetypes.guess_type(local_path)[0] in archive_mimetypes",
"def _gz(filename):\n \n with open(filename, 'rb') as f:\n return binascii.hexlify(f.read(2)) == b'1f8b'",
"def are_files_gzipped(raw_files):\n files_are_gzipped = None\n for file_name in raw_files:\n if re.search(r\"\\.gz$\", file_name) is not None:\n if files_are_gzipped is False:\n raise Exception(\n \"It seems one file is compressed and the \"\n \"other is \"\n \"not:\\n{}\".format(\"\\n\".join(raw_files))\n )\n files_are_gzipped = True\n else:\n if files_are_gzipped:\n raise Exception(\n \"It seems one file is compressed and the \"\n \"other is \"\n \"not:\\n{}\".format(\"\\n\".join(raw_files))\n )\n files_are_gzipped = False\n return files_are_gzipped",
"def is_gz_file(f):\n with open(f, \"rb\") as fin:\n return binascii.hexlify(fin.read(2)) == b\"1f8b\"",
"def test_compress_file_response(self):\n with open(__file__, \"rb\") as file1:\n\n def get_response(req):\n file_resp = FileResponse(file1)\n file_resp[\"Content-Type\"] = \"text/html; charset=UTF-8\"\n return file_resp\n\n r = GZipMiddleware(get_response)(self.req)\n with open(__file__, \"rb\") as file2:\n self.assertEqual(self.decompress(b\"\".join(r)), file2.read())\n self.assertEqual(r.get(\"Content-Encoding\"), \"gzip\")\n self.assertIsNot(r.file_to_stream, file1)",
"def is_gzipped(response):\n ctype = response.headers.get('Content-Type', b'').lower()\n cenc = response.headers.get('Content-Encoding', b'').lower()\n return (_is_gzipped(ctype) or\n (_is_octetstream(ctype) and cenc in (b'gzip', b'x-gzip')))",
"def maybe_compress(filename, compress_minsize=config.COMPRESS_MINSIZE):\n size = os.path.getsize(filename)\n if size < compress_minsize:\n return open(filename, 'rb'), False\n\n compressed_size, compressed_fobj = compress_file(filename)\n if compressed_size >= size:\n # Compressed file was larger\n log.info(\"%s was larger when compressed; using uncompressed version\", filename)\n return open(filename, 'rb'), False\n\n return compressed_fobj, True",
"def is_jpegxl_recompressed_jpeg_file(filename):\n try:\n with open(filename, 'rb') as h:\n header = h.read(len(JPEGXL_RECOMPRESSED_JPEG_HEADER))\n # Cf. https://arxiv.org/pdf/1908.03565.pdf, section 9.1,\n # on recompressed-JPEG header.\n return header == JPEGXL_RECOMPRESSED_JPEG_HEADER\n except: # pylint:disable=bare-except\n # If anything failed, this means that we cannot establish that the file\n # has the expected header, so we return False.\n return False",
"def chk_for_gz(filenm):\n import os\n from os.path import expanduser\n filenm = expanduser(filenm)\n\n # File exist?\n if os.path.lexists(filenm):\n chk=True\n return filenm, chk\n\n # .gz already\n if filenm.find('.gz') > 0:\n chk=0\n return filenm, chk\n\n # Add .gz\n if os.path.lexists(filenm+'.gz'):\n chk=True\n return filenm+'.gz', chk\n else:\n chk=False\n return None, chk",
"def compress_content(content_type, content):\n \n command = 'java -jar %s --type=%s' % (yuicompressor_path, content_type)\n p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)\n p.stdin.write(content)\n p.stdin.close()\n \n compressed = p.stdout.read()\n p.stdout.close()\n \n err = p.stderr.read()\n p.stderr.close()\n \n if p.wait() != 0:\n if not err:\n err = 'Unable to use YUI Compressor'\n \n \n return err, compressed",
"def test__decompress(filename):\n with open(filename, mode=\"rb\") as file_handle:\n name, content = Submit._decompress(filename, file_handle)\n assert name.endswith(\"EcoliCore.xml\")\n assert len(content.read()) >= 494226",
"def is_gzip(fp):\r\n return open(fp, 'rb').read(2) == '\\x1f\\x8b'",
"def isGzippable(self, css=0, js=0, REQUEST=None):\n # force: force http compression even if the browser doesn't send an accept\n # debug: return compression state (0: no, 1: yes, 2: force)\n # css: set this to 1 inside a css file (for later use)\n # js: set this to 1 inside a js file (for later use)\n\n if REQUEST is None:\n REQUEST = self.REQUEST\n use_gzip = self.getGzip()\n if not self.getEnabled():\n use_gzip = 'never'\n\n force = 0\n if use_gzip == 'never':\n enable_compression = 0\n elif use_gzip == 'always':\n enable_compression = 1\n force = 1\n elif use_gzip == 'accept-encoding':\n # compress everything except css and js\n enable_compression = 1\n elif use_gzip == 'accept-encoding+user-agent':\n # gzip compatibility info courtesy of\n # http://httpd.apache.org/docs/2.2/mod/mod_deflate.html\n user_agent = REQUEST.get('HTTP_USER_AGENT', '')\n if user_agent.startswith('Mozilla/4'):\n # Netscape 4.x can't handle gzipped css and js\n enable_compression = (css==0 and js==0)\n # Netscape 4.0.6-4.0.8 has some gzip-related bugs\n if user_agent[len('Mozilla/4.')] in ('6','7','8'):\n enable_compression = 0\n # Some versions of MSIE pretend to be Netscape 4.x but are OK with gzipping\n if user_agent.find('MSIE'):\n enable_compression = 1\n\n return (enable_compression, force, REQUEST.get('HTTP_ACCEPT_ENCODING', '').find('gzip') != -1)",
"def is_accept_type(file_name):\n bare_name, file_extension = os.path.splitext(file_name)\n for ext in ACCEPTED_FILES:\n if file_extension.lower() == ext:\n return True\n return False",
"def check_compression(ctype, clevel, olevel):\n repository = Repository(archiver.repository_path, exclusive=True)\n with repository:\n manifest = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)\n state = None\n while True:\n ids, state = repository.scan(limit=LIST_SCAN_LIMIT, state=state)\n if not ids:\n break\n for id in ids:\n chunk = repository.get(id, read_data=True)\n meta, data = manifest.repo_objs.parse(id, chunk) # will also decompress according to metadata\n m_olevel = meta.get(\"olevel\", -1)\n m_psize = meta.get(\"psize\", -1)\n print(\n hexlify(id).decode(),\n meta[\"ctype\"],\n meta[\"clevel\"],\n meta[\"csize\"],\n meta[\"size\"],\n m_olevel,\n m_psize,\n )\n # this is not as easy as one thinks due to the DecidingCompressor choosing the smallest of\n # (desired compressed, lz4 compressed, not compressed).\n assert meta[\"ctype\"] in (ctype, LZ4.ID, CNONE.ID)\n assert meta[\"clevel\"] in (clevel, 255) # LZ4 and CNONE has level 255\n if olevel != -1: # we expect obfuscation\n assert \"psize\" in meta\n assert m_olevel == olevel\n else:\n assert \"psize\" not in meta\n assert \"olevel\" not in meta",
"def is_valid(path):\n with open(path, 'rb') as handle:\n size = os.fstat(handle.fileno()).st_size\n try:\n mgz.header.parse_stream(handle)\n mgz.body.meta.parse_stream(handle)\n while handle.tell() < size:\n mgz.body.operation.parse_stream(handle)\n print('valid')\n return True\n except ConstructError:\n print('invalid')\n return False",
"def test_IsPackage_files():\n with tempfile.NamedTemporaryFile() as f:\n assert not dpack._IsPackage(pathlib.Path(f.name))\n with tempfile.NamedTemporaryFile(suffix=\".txt\") as f:\n assert not dpack._IsPackage(pathlib.Path(f.name))\n with tempfile.NamedTemporaryFile(suffix=\".tar.bz2\") as f:\n assert not dpack._IsPackage(pathlib.Path(f.name))\n with tempfile.NamedTemporaryFile(suffix=\".dpack.tar.bz2\") as f:\n assert dpack._IsPackage(pathlib.Path(f.name))",
"def testCompressedSize(self):\n\n uncompressed_file = tempfile.NamedTemporaryFile(delete=False)\n for line in range(200):\n uncompressed_file.write(\n 'Lorem ipsum dolor sit amet, consectetur adipiscing elit. '\n 'Sed eleifend')\n uncompressed_file.close()\n compressed_path = uncompressed_file.name + '.compressed'\n compressor_path = os.path.join(DIR_SOURCE_ROOT, 'third_party',\n 'fuchsia-sdk', 'sdk', 'tools', 'x64',\n 'blobfs-compression')\n subprocess.call([compressor_path, uncompressed_file.name, compressed_path])\n self.assertEqual(binary_sizes.CompressedSize(uncompressed_file.name),\n os.path.getsize(compressed_path))\n os.remove(uncompressed_file.name)\n os.remove(compressed_path)",
"def _is_valid_ct(content_type: str) -> bool:\n content_type = content_type.strip()\n return _is_valid_regex(CT_CONTENT_TYPE_REGEX_PATTERN, content_type)",
"def test_compressed(self):\n try:\n import zlib\n except ImportError:\n self.skipTest('zlib is missing')\n\n ba = amf3.ByteArray()\n\n self.assertFalse(ba.compressed)\n\n z = zlib.compress(b'b' * 100)\n ba = amf3.ByteArray(z)\n\n self.assertTrue(ba.compressed)\n\n z = zlib.compress(b'\\x00' * 100)\n ba = amf3.ByteArray(z)\n\n self.assertTrue(ba.compressed)",
"def _iszip(self, filename):\n fname, ext = os.path.splitext(filename)\n return ext in _file_openers.keys()",
"def test_compress_response(self):\n r = GZipMiddleware(self.get_response)(self.req)\n self.assertEqual(self.decompress(r.content), self.compressible_string)\n self.assertEqual(r.get(\"Content-Encoding\"), \"gzip\")\n self.assertEqual(r.get(\"Content-Length\"), str(len(r.content)))",
"def can_minimize_file(file_path):\n # If this is not a binary file, we should be able to minimize it in some way.\n if not utils.is_binary_file(file_path):\n return True\n\n # Attempt to minimize IPC dumps.\n if file_path.endswith(testcase_manager.IPCDUMP_EXTENSION):\n return supports_ipc_minimization(file_path)\n\n # Other binary file formats are not supported.\n return False",
"def test_extension_to_content_type(self):\n assert ct.extension_to_content_type(\"jpg\") == \"image/jpg\"\n assert ct.extension_to_content_type(\"jpeg\") == \"image/jpg\"\n assert ct.extension_to_content_type(\"png\") == \"image/png\"\n ct.extension_to_content_type(\"css\",) == \"text/css\"\n ct.extension_to_content_type(\"html\") == \"text/html\"\n ct.extension_to_content_type(\"json\") == \"application/json\"\n ct.extension_to_content_type(\"xml\") == \"application/xml\"\n ct.extension_to_content_type(\"zip\") == \"application/zip\""
] |
[
"0.64723974",
"0.64621985",
"0.6352601",
"0.6308476",
"0.6298893",
"0.62387055",
"0.617346",
"0.6168313",
"0.613196",
"0.6075621",
"0.6057141",
"0.6023388",
"0.59639585",
"0.59618855",
"0.59406054",
"0.5932078",
"0.5916286",
"0.5899778",
"0.5892047",
"0.5887669",
"0.5869888",
"0.58216345",
"0.5794927",
"0.57736546",
"0.57636416",
"0.57325137",
"0.57113236",
"0.57090557",
"0.569644",
"0.5648207"
] |
0.8202419
|
0
|
Compress the content string passed. Should be called when gzip is enabled to compress text types. There is no real advantage in using this with images, since most are already nicely compressed by some image processing algorithm.
|
def _compress_string(content):
zbuf = StringIO()
zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)
zfile.write(content)
zfile.close()
return zbuf.getvalue()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def compress(string):",
"def compress(string):",
"def _compress_content(self, content):\n zbuf = io.BytesIO()\n zfile = gzip.GzipFile(mode=\"wb\", compresslevel=9, fileobj=zbuf)\n\n try:\n zfile.write(content.read())\n finally:\n zfile.close()\n\n content.file = zbuf\n content.seek(0)\n\n return content",
"def compress(content, threshold=512):\n compression_enabled = CONF.logging.http_request_compression\n\n if is_dict(content):\n for key in content:\n content[key] = compress(content[key])\n if is_string(content) and compression_enabled:\n if len(content) > threshold:\n less_data = content[:50]\n compressed_data = base64.b64encode(\n zlib.compress(bytes(content.encode(\"utf-8\"))))\n if not six.PY2:\n compressed_data = str(compressed_data.decode(\"utf-8\"))\n return pprint.pformat(\n \"\\n***Content compressed by Syntribos.***\"\n \"\\nFirst fifty characters of content:\\n\"\n \"***{data}***\"\n \"\\nBase64 encoded compressed content:\\n\"\n \"{compressed}\"\n \"\\n***End of compressed content.***\\n\".format(\n data=less_data, compressed=compressed_data))\n return content",
"def compression(s):",
"def _gzipencode(content):\n import gzip\n out = BytesIO()\n f = gzip.GzipFile(fileobj=out, mode='w', compresslevel=5)\n f.write(content)\n f.close()\n return out.getvalue()",
"def compress_zlib(self, string):\n #encode the input sting\n self.string = string.encode()\n return zlib.compress(self.string)",
"def compressString(s):\n import cStringIO, gzip\n\n # Nasty monkeypatch to avoid gzip changing every time\n class FakeTime:\n def time(self):\n return 1111111111.111\n\n gzip.time = FakeTime()\n\n zbuf = cStringIO.StringIO()\n zfile = gzip.GzipFile(mode='wb', compresslevel=9, fileobj=zbuf)\n zfile.write(s)\n zfile.close()\n return zbuf.getvalue()",
"def encode(self, compress=0):\n raw = bytes(self._encode())\n return gzip.compress(raw, compress) if compress else raw",
"def compress_content(content_type, content):\n \n command = 'java -jar %s --type=%s' % (yuicompressor_path, content_type)\n p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)\n p.stdin.write(content)\n p.stdin.close()\n \n compressed = p.stdout.read()\n p.stdout.close()\n \n err = p.stderr.read()\n p.stderr.close()\n \n if p.wait() != 0:\n if not err:\n err = 'Unable to use YUI Compressor'\n \n \n return err, compressed",
"def compress(self, s):\n data = zlib.compress(s)\n # drop gzip headers and tail\n return data[2:-4]",
"def __handle_compression(self, x):\n if self.__compress:\n return zlib.compress(x)\n return x",
"def gzip_compress(data):\n s = BytesIO()\n g = gzip.GzipFile(fileobj=s, mode='wb')\n g.write(data)\n g.close()\n return s.getvalue()",
"def compression(self) -> str:\n ...",
"def compress_encode(value):\n return base64.b64encode(zlib.compress(value.encode(\"ascii\"))).decode(\"ascii\")",
"def _gzip_str(string_):\n out = BytesIO()\n\n with gzip.GzipFile(fileobj=out, mode='w') as fo:\n fo.write(string_.encode())\n\n bytes_obj = out.getvalue()\n return bytes_obj",
"def compressBuffer(self, buffer):\r\n # http://jython.xhaus.com/http-compression-in-python-and-jython/\r\n zbuf = cStringIO.StringIO()\r\n zfile = gzip.GzipFile(mode='wb', fileobj=zbuf, compresslevel=9)\r\n zfile.write(buffer)\r\n zfile.close()\r\n return zbuf.getvalue()",
"def string_compression(w):\n if len(w) <= 1:\n return w\n\n substrings = []\n prev_char = w[0]\n char_count = 1\n for char in w[1:]:\n if prev_char == char:\n char_count += 1\n else:\n substrings.append('%s%s' % (prev_char, char_count))\n char_count = 1\n prev_char = char\n\n substrings.append('%s%s' % (prev_char, char_count))\n\n compression = ''.join(substrings)\n if len(compression) < len(w):\n return compression\n else:\n return w",
"def compress(bstr):\n from sphobjinv.re import pb_comments, pb_data\n\n # Preconvert any DOS newlines to Unix\n s = bstr.replace(b\"\\r\\n\", b\"\\n\")\n\n # Pull all of the lines\n m_comments = pb_comments.findall(s)\n m_data = pb_data.finditer(s)\n\n # Assemble the binary header comments and data\n # Comments and data blocks must end in newlines\n hb = b\"\\n\".join(m_comments) + b\"\\n\"\n db = b\"\\n\".join(_.group(0) for _ in m_data) + b\"\\n\"\n\n # Compress the data block\n # Compression level nine is to match that specified in\n # sphinx html builder:\n # https://github.com/sphinx-doc/sphinx/blob/1.4.1/sphinx/\n # builders/html.py#L843\n dbc = zlib.compress(db, 9)\n\n # Return the composited bytestring\n return hb + dbc",
"def gzdeflate():\n return zlib.compress(val)",
"def postprocess(self, json_string):\n is_compressing, is_hash, compressed, spaces = False, False, [], 0\n for row in json_string.split(\"\\n\"):\n if is_compressing:\n if (row[:spaces + 5] == \" \" * (spaces + 4) +\n (\"\\\"\" if is_hash else \"{\")):\n compressed.append(row.rstrip())\n elif (len(row) > spaces and row[:spaces] == \" \" * spaces and\n re.match(\"[\\]\\}],?\", row[spaces:].rstrip())):\n compressed.append(row.rstrip())\n is_compressing = False\n else:\n compressed[-1] += \" \" + row.strip()\n else:\n compressed.append(row.rstrip())\n if any(a in row for a in [\"edges\", \"nodes\"]):\n # Fix to handle issues that arise with empty lists\n if \"[]\" in row:\n continue\n spaces = sum(1 for _ in takewhile(str.isspace, row))\n is_compressing, is_hash = True, \"{\" in row\n return \"\\n\".join(compressed)",
"def compress(value):\n pickled = pickle_util.dump(value)\n return zlib.compress(pickled)",
"def compress(self, data):\r\n return self.add_chunk(data)",
"def compress(string):\n\n compressed = []\n\n curr_char = \"\"\n char_count = 0\n\n for char in string:\n if char != curr_char:\n compressed.append(curr_char)\n\n if char_count > 1:\n compressed.append(str(char_count))\n\n curr_char = char\n char_count = 0\n\n char_count += 1 \n\n compressed.append(curr_char)\n if char_count > 1:\n compressed.append(str(char_count))\n\n return \"\".join(compressed)",
"def compress(string):\n \n # Build the dictionary.\n dict_size = 256\n seen = dict((chr(i), i) for i in range(dict_size))\n \n p = \"\"\n output = 0\n for c in string:\n pc = p + c\n if pc in seen:\n p = pc\n else:\n # We have not seen this. Output the stuff.\n output += 1\n seen[pc] = dict_size\n dict_size += 1\n p = c\n \n # Output the code for w.\n return output * 12",
"def test_compress():\n print('Testing compress')\n\n # Cases given to test this problem\n assert_equals('c1o17l1k1a1n1g1a1r1o2',\n hw1.compress('cooooooooooooooooolkangaroo'))\n assert_equals('a3', hw1.compress('aaa'))\n assert_equals('', hw1.compress(''))\n\n # Additional cases to test this problem\n assert_equals('a1p2l1e1', hw1.compress('apple'))\n assert_equals('g1o6d1a1w1g4s3', hw1.compress('goooooodawggggsss'))",
"def compress(string):\n\n past_chars = [string[0]]\n char_counts = [1]\n\n for i in range(1, len(string)):\n if string[i] == past_chars[-1]:\n char_counts[-1] += 1\n else:\n past_chars.append(string[i])\n char_counts.append(1)\n\n compressed_string = \"\"\n\n # list_of_ones = []\n # for i in range(len(string)):\n # list_of_ones.append(1)\n list_of_ones = [1 for x in range(len(string))]\n\n if char_counts == list_of_ones:\n return string\n else:\n for char, count in zip(past_chars, char_counts):\n compressed_string += char + str(count)\n\n\n return compressed_string",
"def compress(self, file):\n\t\t\n\t\ttext = file.read() \n\t\ttext = text.rstrip() #elimina los espacios en blanco del final\n\n\t\t\n\t\tfrequency = self.make_frequency_dict(text)#obtenemos la frencuencia de cada numero en el texto\n\t\tself.make_heap(frequency)\n\t\tself.merge_nodes()\n\t\tself.make_codes()\n\t\tencoded_text = self.get_encoded_text(text)\n\t\tpadded_encoded_text = self.pad_encoded_text(encoded_text)\n\n\t\tb = self.get_byte_array(padded_encoded_text)\n\n\t\treturn b",
"def tar_gz_compress(self, destination):\n\n if destination is not None and isinstance(destination, str):\n with tarfile_open(destination, \"w:gz\") as tar:\n tar.add(self.file)",
"def Compress(input_filename, output_filename):\n _Write(zlib.compress(_Read(input_filename)), output_filename)"
] |
[
"0.727991",
"0.727991",
"0.7271702",
"0.7243655",
"0.72023225",
"0.7170906",
"0.71320665",
"0.6685947",
"0.6557357",
"0.64971095",
"0.6466577",
"0.63694817",
"0.6278639",
"0.624268",
"0.6192079",
"0.61507696",
"0.6118266",
"0.59070766",
"0.57896465",
"0.5700065",
"0.5692797",
"0.56811476",
"0.567238",
"0.5641472",
"0.56245315",
"0.5582722",
"0.55467457",
"0.5535953",
"0.5488455",
"0.5457179"
] |
0.7453164
|
0
|
Build the metadata local file with all sha information about files. File location is computed based on home kwargument.
|
def _build_local_metadata_file(files, home=''):
filepaths = [os.path.join(home, f) for f in files]
shas = [_get_sha_metadata(f) for f in filepaths]
metadata = dict(zip(files, shas))
with open(LOCAL_METADATA_FILE, 'w') as f:
f.write(json.dumps(metadata))
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def generate_metadata(self):\n self.metadata = {\n 'title': os.path.basename(self.source_file).rsplit('.', 1)[0],\n 'url': self.relative_destination_file,\n 'full_path': os.path.dirname(self.relative_destination_file),\n 'short_path': self.shorten_path(\n os.path.dirname(self.relative_destination_file))\n }",
"def _update_filesystem_metadata(self, metadata):\n directory, fname = os.path.split(self.fname)\n fbase = os.path.splitext(fname)[0]\n \n # Test for presence and size of zip file\n zip_file = fbase + '.zip'\n zip_path = os.path.join(directory, zip_file)\n \n if os.path.isfile(zip_path):\n location = 'on_disk'\n data_file_size = os.path.getsize(zip_path)\n else:\n location = 'on_tape'\n data_file_size = 0\n \n # Test for presence of quick look PNG file\n quicklook_file = fbase + '.png'\n quicklook_path = os.path.join(directory, quicklook_file)\n \n if not os.path.isfile(quicklook_path):\n quicklook_file = ''\n\n # Add to metadata dictionary\n item_map = {'directory': directory, 'metadata_file': fname,\n 'data_file': zip_file, 'location': location, \n 'data_file_size': data_file_size, 'quicklook_file': quicklook_file}\n \n for key, value in item_map.items():\n metadata[key] = value",
"def metadata_path(self):\n return os.path.join(self.path, 'metadata.txt')",
"def metadata_path(self) -> Path:\n return self.download_folder() / f\"{self.manufacturer_ref}-meta.json\"",
"def generate_metadata(install_req):\n # type: (InstallRequirement) -> str\n assert install_req.pep517_backend is not None\n build_env = install_req.build_env\n backend = install_req.pep517_backend\n\n # NOTE: This needs to be refactored to stop using atexit\n metadata_tmpdir = TempDirectory(kind=\"modern-metadata\")\n atexit.register(metadata_tmpdir.cleanup)\n\n metadata_dir = metadata_tmpdir.path\n\n with build_env:\n # Note that Pep517HookCaller implements a fallback for\n # prepare_metadata_for_build_wheel, so we don't have to\n # consider the possibility that this hook doesn't exist.\n runner = runner_with_spinner_message(\"Preparing wheel metadata\")\n with backend.subprocess_runner(runner):\n distinfo_dir = backend.prepare_metadata_for_build_wheel(\n metadata_dir\n )\n\n return os.path.join(metadata_dir, distinfo_dir)",
"def add_file_metadata(self):\n metadata = self.__file.require_group(METADATA)\n self.__write_value(metadata, DATE_CREATED, date.today().strftime(\"%Y-%m-%d\"))\n self.__write_value(metadata, SDK_VERSION, __version__)",
"def generate_metadata_files(self):\n\n data_folder = self.get_data_folder(mode='absolute')\n\n parents = (data_folder / '_').parents\n\n for mfile in self.mdata:\n for regex, level in METADATA_LEVEL_BY_NAME.items():\n if re.compile(regex).match(mfile.name):\n create_file(mfile, parents[(3-level)] / mfile.name,\n mode='copy')",
"def build_data(cmd, rel_new_path, new_md5, founded_path=None):\n data = {'cmd': cmd}\n if cmd == 'copy':\n data['file'] = {'src': founded_path,\n 'dst': rel_new_path,\n 'md5': new_md5,\n }\n else:\n data['file'] = {'filepath': rel_new_path,\n 'md5': new_md5,\n }\n return data",
"def get_metadata(self):\n previous = DirectoryMetadata.load_pickle(self)\n metadata = {}\n\n for dirpath, dirnames, filenames in os.walk(self.prefix_dir):\n for fname in filenames:\n path = os.path.join(dirpath, fname)\n relative_path = path.split(self.base_dir, 1)[1]\n try:\n stats = os.stat(path)\n except OSError:\n log.exception('Error stating a file on disk while building up metadata, skipping file %s' % path)\n continue\n swift_bytes = stats.st_size\n mtime = datetime.utcfromtimestamp(stats.st_mtime)\n if (previous is not None) and (relative_path in previous.metadata) and\\\n (previous.metadata[relative_path].bytes == swift_bytes):\n swift_hash = previous.metadata[relative_path].hash\n else:\n try:\n with open(path, 'rb') as afile:\n md5_hash = hashlib.md5()\n md5_hash.update(afile.read())\n swift_hash = md5_hash.hexdigest()\n except OSError:\n log.exception('Error reading a file to create the md5 while building up metadata, skipping file %s' % path)\n continue\n\n metadata[relative_path] = FileMetadata(relative_path, swift_bytes, mtime, swift_hash)\n\n return metadata",
"def _add_filename_metadata(self, extra_metadata): \n \n # Make sure product_info section exists\n extra_metadata.setdefault('product_info', {})\n \n file_name = os.path.basename(self.fname)\n fn_comps = file_name.split(\"_\")\n \n if self.__class__ == SAFESentinel1:\n component = fn_comps[2]\n if len(component) < 4: \n resolution = 'N/A'\n else:\n resolution = component[-1]\n \n extra_metadata['product_info']['Resolution'] = resolution\n \n # Add file/scan name \n extra_metadata['product_info']['Name'] = os.path.splitext(file_name)[0]\n \n # Add Satellite and Mission from the file path\n comp_1 = fn_comps[0].upper()\n extra_metadata['platform']['Mission'] = \"Sentinel-%s\" % comp_1[1]\n extra_metadata['platform']['Satellite'] = \"Sentinel-%s\" % comp_1[1:]",
"def printFileHash(fulltextData, artMeta):\n crawlerName = fulltextData['crawlerName']\n for ext, page in fulltextData.iteritems():\n if ext in ('crawlerName', 'status'):\n continue\n if ext == 'main.pdf':\n checkIfPdf(page, artMeta)\n sha1 = hashlib.sha1(page['data']).hexdigest()\n row = [crawlerName,\n ext,\n page['url'],\n str(len(page['data'])),\n sha1]\n print('\\t'.join(row))",
"def gen_file_metadata_summary(self, metadata):\n title = sub('[\\W_]+', '', metadata['title'].lower())\n season = str(metadata['season']).zfill(2)\n episode = str(metadata['episode']).zfill(2)\n\n file_metadata_summary = f'{title}.S{season}E{episode}'\n return file_metadata_summary",
"def __build_file_name(self, func, args):\n # Build a unique string to hash\n if self.__log:\n self.__logger.info(f\"Building file name for {func.__name__} with {args}\")\n\n # Hash with the specified algorithm and hexdigest\n # to produce a string\n fname = self.algorithm(\n b\"\".join([func.__name__.encode(\"utf8\"), pickle.dumps(args)])\n ).hexdigest()\n\n pathToFile = os.path.join(self.cacheDir, fname)\n if self.__log:\n self.__logger.info(f\"Built path {pathToFile}\")\n return pathToFile",
"def gen_meta(self, filename):\n nf_meta = {}\n nf_meta['filename'] = filename\n nf_meta['deleted'] = 0\n\n # http://stackoverflow.com/a/5297483\n nf_meta['key'] = hashlib.md5(str(filename).encode('utf-8')).hexdigest()\n self.log.debug(\"Note File Meta Key: %s\", nf_meta['key'])\n\n path = self.config.get_config('cfg_nt_path')\n\n # WARNING THIS IS PLATFORM SPECIFIC\n nf_meta['createdate'] = os.stat(path + \"/\" + filename).st_birthtime\n self.log.debug(\"Note File Meta Created: %s [%s]\", nf_meta['createdate'], time.ctime(nf_meta['createdate']))\n\n nf_meta['modifydate'] = os.stat(path + \"/\" + filename).st_mtime\n self.log.debug(\"Note File Meta Modified: %s [%s]\", nf_meta['modifydate'], time.ctime(nf_meta['modifydate']))\n\n return nf_meta",
"def create_meta(prefix, dist, info_dir, extra_info):\n # read info/index.json first\n with open(join(info_dir, 'index.json')) as fi:\n meta = json.load(fi)\n # add extra info\n meta.update(extra_info)\n # write into <prefix>/conda-meta/<dist>.json\n meta_dir = join(prefix, 'conda-meta')\n if not isdir(meta_dir):\n os.makedirs(meta_dir)\n with open(join(meta_dir, 'history'), 'w') as fo:\n fo.write('')\n with open(join(meta_dir, dist + '.json'), 'w') as fo:\n json.dump(meta, fo, indent=2, sort_keys=True)",
"def metadata_path(self, fmt: str = \"csv\"):\n if self.options.metadata_as_name:\n save_name = self.dataset_name.lower().replace(\" \", \"_\").replace(\"-\", \"_\") + f\".{fmt}\"\n return os.path.join(self.extracted_path, f\"{save_name}\")\n else:\n return os.path.join(self.extracted_path, f\"metadata.{fmt}\")",
"def _store_package_metadata(self):",
"def create_file_meta_data(vk4_container, args):\n log.debug(\"Entering create_file_meta_data()\")\n\n header_list = list()\n header_list.append(args.layer)\n header_list.append('\\n')\n header_list.append('File name')\n header_list.append(args.input)\n header_list.append('Title')\n header_list.append(args.input[:-4])\n header_list.append('Measurement date')\n header_list.append(str(vk4_container.measurement_conditions['month']) + '\\\\' +\n str(vk4_container.measurement_conditions['day']) + '\\\\' +\n str(vk4_container.measurement_conditions['year']))\n header_list.append('Measurement time')\n header_list.append(str(vk4_container.measurement_conditions['hour']) + ':' +\n str(vk4_container.measurement_conditions['minute']) + ':' +\n str(vk4_container.measurement_conditions['second']))\n # User mode?\n header_list.append('Objective lens')\n header_list.append(vk4_container.string_data['lens_name'] + ' ' +\n str(vk4_container.measurement_conditions['lens_magnification'] / 10.0) + 'x')\n header_list.append('Numerical Aperture')\n header_list.append(vk4_container.measurement_conditions['num_aperture'] / 1000.0)\n # Size? Standard?\n # Mode? Surface profile?\n # RPD? OFF?\n header_list.append('Quality')\n header_list.append('Skip 4 lines')\n header_list.append('Pitch (um)')\n header_list.append(vk4_container.measurement_conditions['pitch'] / 1000.0)\n header_list.append('Z measurement distance (um)')\n header_list.append(vk4_container.measurement_conditions['distance'] / 1000.0)\n # Double scan? OFF?\n header_list.append('Brightness 1')\n header_list.append(vk4_container.measurement_conditions['PMT_gain'])\n header_list.append('Brightness 2')\n br_2 = vk4_container.measurement_conditions['PMT_gain_2']\n header_list.append('---') if br_2 == 0 else header_list.append(br_2)\n # Not sure how they got ND filter to 30% in example csv\n header_list.append('ND filter (%)')\n header_list.append(vk4_container.measurement_conditions['ND_filter'] * 30)\n header_list.append('Optical zoom')\n header_list.append(vk4_container.measurement_conditions['optical_zoom'] / 10.0)\n # Average count? 1 time?\n # Filter? OFF?\n # Fine mode? ON?\n header_list.append('Line count')\n l_count = vk4_container.measurement_conditions['number_of_lines']\n header_list.append(l_count)\n\n header_list.append('Line position1')\n if l_count == 0:\n header_list.append('---')\n else:\n header_list.append(vk4_container.measurement_conditions['reserved_1'][0])\n\n header_list.append('Line position2')\n if l_count == 0:\n header_list.append('---')\n else:\n header_list.append(vk4_container.measurement_conditions['reserved_1'][1])\n\n header_list.append('Line position3')\n if l_count == 0:\n header_list.append('---')\n else:\n header_list.append(vk4_container.measurement_conditions['reserved_1'][2])\n\n header_list.append('Camera gain (db)')\n header_list.append(vk4_container.measurement_conditions['camera_gain'] * 6)\n header_list.append('Shutter speed')\n header_list.append(vk4_container.measurement_conditions['shutter_speed'])\n header_list.append('White balance mode')\n wb_mode = vk4_container.measurement_conditions['white_balance_mode']\n header_list.append('Auto') if wb_mode == 1 else header_list.append(wb_mode)\n header_list.append('White balance R')\n header_list.append(vk4_container.measurement_conditions['white_balance_red'])\n header_list.append('White balance B')\n header_list.append(vk4_container.measurement_conditions['white_balance_blue'])\n header_list.append('Intensity correction mode')\n header_list.append('Gamma correction')\n header_list.append('Gamma correction value')\n header_list.append(vk4_container.measurement_conditions['gamma'] / 100.0)\n header_list.append('Gamma offset (%)')\n header_list.append(vk4_container.measurement_conditions['gamma_correction_offset'] /\n 65536.0)\n # W/B inversion? OFF?\n # Head type? VK-X110?\n # Correct intensity eccentricity? OFF?\n # Correct field curvature? OFF?\n header_list.append('XY calibration (nm/pixel)')\n header_list.append(vk4_container.measurement_conditions['x_length_per_pixel'] / 1000.0)\n header_list.append('Z calibration (nm/digit)')\n header_list.append(vk4_container.measurement_conditions['z_length_per_digit'] / 1000.0)\n # Saturation?\n # Contrast?\n # Brightness?\n # AI noise elimination? Auto(ON)?\n # Angled surface noise filter? Auto(OFF)?\n header_list.append('Width')\n header_list.append(vk4_container.image_width)\n header_list.append('Height')\n header_list.append(vk4_container.image_height)\n # Skip amount? 1?\n\n out_type = args.type\n if out_type == 'hcsv':\n log.debug(\"Exiting create_file_meta_data() where out_type == %s\" % out_type)\n return np.reshape(header_list, (len(header_list) // 2, 2))\n else:\n # Can use a dict to attach info to an image using PILs Image module\n meta_dict = dict()\n for n in range(0, len(header_list), 2):\n meta_dict[header_list[n]] = header_list[n + 1]\n\n log.debug(\"Exiting create_file_meta_data() where out_type == %s\" % out_type)\n return meta_dict",
"def __init__(self, updater_name, repository_mirrors):\n \n # Do the arguments have the correct format?\n # These checks ensure the arguments have the appropriate\n # number of objects and object types and that all dict\n # keys are properly named.\n # Raise 'tuf.FormatError' if there is a mistmatch.\n tuf.formats.NAME_SCHEMA.check_match(updater_name)\n tuf.formats.MIRRORDICT_SCHEMA.check_match(repository_mirrors)\n \n # Save the validated arguments.\n self.name = updater_name\n self.mirrors = repository_mirrors\n\n # Store the trusted metadata read from disk.\n self.metadata = {}\n \n # Store the currently trusted/verified metadata.\n self.metadata['current'] = {} \n \n # Store the previously trusted/verified metadata.\n self.metadata['previous'] = {}\n\n # Store the file information of all the metadata files. The dict keys are\n # paths, the dict values fileinfo data. This information can help determine\n # whether a metadata file has changed and so needs to be re-downloaded.\n self.fileinfo = {}\n \n # Store the location of the client's metadata directory.\n self.metadata_directory = {}\n \n # Ensure the repository metadata directory has been set.\n if tuf.conf.repository_directory is None:\n message = 'The TUF update client module must specify the directory' \\\n ' containing the local repository files.' \\\n ' \"tuf.conf.repository_directory\" MUST be set.'\n raise tuf.RepositoryError(message)\n\n # Set the path for the current set of metadata files. \n repository_directory = tuf.conf.repository_directory\n current_path = os.path.join(repository_directory, 'metadata', 'current')\n \n # Ensure the current path is valid/exists before saving it.\n if not os.path.exists(current_path):\n message = 'Missing '+repr(current_path)+'. This path must exist and, ' \\\n 'at a minimum, contain the root metadata file.' \n raise tuf.RepositoryError(message)\n self.metadata_directory['current'] = current_path\n \n # Set the path for the previous set of metadata files. \n previous_path = os.path.join(repository_directory, 'metadata', 'previous') \n \n # Ensure the previous path is valid/exists.\n if not os.path.exists(previous_path):\n message = 'Missing '+repr(previous_path)+'. This path must exist.'\n raise tuf.RepositoryError(message)\n self.metadata_directory['previous'] = previous_path\n \n # Load current and previous metadata.\n for metadata_set in ['current', 'previous']:\n for metadata_role in ['root', 'targets', 'release', 'timestamp']:\n self._load_metadata_from_file(metadata_set, metadata_role)\n \n # Raise an exception if the repository is missing the required 'root'\n # metadata.\n if 'root' not in self.metadata['current']:\n message = 'No root of trust! Could not find the \"root.txt\" file.'\n raise tuf.RepositoryError(message)",
"def create_readme(case_dict):\n # ---------------------------------------------------------------------\n logger.debug(\"create_readme\")\n os.chdir(case_dict[\"archive_temp_dir\"])\n\n fname = open(\"README.archive\", \"w\")\n fname.write(\"Archived metadata is available for this case at URL:\\n\")\n fname.write(case_dict[\"base_expdb_url\"])\n fname.close()",
"def create_version_file(version='unknown', gitmeta=''):\n\tfname = join(dirname(abspath(__file__)), 'MHLogin', '_version.py')\n\tf = open(fname, 'wb')\n\tf.write(VERSION_PY % {'version': version, 'gitmeta': gitmeta, })\n\tf.close()",
"def write_fileindex_md(hashalgo_md, fileindex, repo_name, hashalgo, format=None, include_local_filename=False):\n if format is None:\n format = \"list\"\n with open(hashalgo_md, \"wb\") as f:\n if format==\"table\":\n header = []\n header.append(\"| FileName | FileDate | \" + hashalgo + \" |\\n\")\n header.append(\"|----------|----------|-------------|\\n\")\n if include_local_filename:\n header[0] = \"| LocalFileName \" + header[0]\n header[1] = \"|---------------\" + header[1]\n for header_line in header:\n f.write(bytes(header_line, \"UTF-8\"))\n for fileindex_item in fileindex:\n checksum = fileindex_item[COLUMN_CHECKSUM]\n filename = fileindex_item[COLUMN_FILENAME]\n filedate = fileindex_item[COLUMN_FILEDATE] if len(fileindex_item) > COLUMN_FILEDATE else \"\"\n local_filename = fileindex_item[COLUMN_LOCAL_FILENAME] if len(fileindex_item) > COLUMN_LOCAL_FILENAME else \"\"\n if format==\"table\":\n row = \"\"\n if include_local_filename:\n row += \"| \" + local_filename + \" \"\n row += \"| [\" + filename + \"](https://github.com/\" + repo_name + \"/releases/download/\" + hashalgo + \"/\" + checksum + \") \"\n row += \"| \" + filedate + \" \"\n row += \"| \" + checksum + \" \"\n f.write(bytes(row + \"|\\n\", \"UTF-8\",))\n else:\n f.write(bytes(\"- [\" + filename + \"](https://github.com/\" + repo_name + \"/releases/download/\" + hashalgo + \"/\" + checksum + \")\\n\", \"UTF-8\",))\n if include_local_filename:\n f.write(bytes(\" - LocalFileName: \" + local_filename + \"\\n\", \"UTF-8\",))\n if filedate:\n f.write(bytes(\" - FileDate: \" + filedate + \"\\n\", \"UTF-8\",))\n f.write(bytes(\" - \" + hashalgo +\": \" + checksum + \"\\n\", \"UTF-8\",))",
"def _gen_metadata_for_list(self, filename, **extra_data):\n file_size = os.stat(filename).st_size\n with open(filename, 'rb') as f:\n md5sum = compute_md5_hash(f)\n core_metadata = {\n 'filename': os.path.abspath(filename),\n 'md5sum': md5sum,\n 'file_size_bytes': file_size\n }\n return {**core_metadata, **extra_data}",
"def build_metadata_from_file(tmp_crawler_folder, abs_data_path, commitTime):\n m = MetaData()\n\n post_path = abs_data_path[len(tmp_crawler_folder):]\n if post_path[0] == '/':\n post_path = post_path[1:]\n\n m['mimeType'] = mime.get_mime(abs_data_path)\n m['url'] = MetaData.get_url_from_path(post_path)\n m['domain'] = MetaData.get_domain_from_path(post_path)\n m['path'] = MetaData.get_content_path_from_tmp(abs_data_path, m['domain'])\n m['tmpPath'] = abs_data_path\n m['createTime'] = utl.get_ctime(abs_data_path)\n m['commitTime'] = commitTime\n m['title'] = extractor.get_title(abs_data_path, m['mimeType'])\n\n if not m['url'] in m['path']:\n print('WARNING, url not in path!')\n return m",
"def run(self, info):\n\n # Write the metadata to the file's xattrs\n self._downloader.to_screen('[metadata] Writing metadata to file\\'s xattrs')\n\n filename = info['filepath']\n\n try:\n xattr_mapping = {\n 'user.xdg.referrer.url': 'webpage_url',\n # 'user.xdg.comment': 'description',\n 'user.dublincore.title': 'title',\n 'user.dublincore.date': 'upload_date',\n 'user.dublincore.description': 'description',\n 'user.dublincore.contributor': 'uploader',\n 'user.dublincore.format': 'format',\n }\n\n num_written = 0\n for xattrname, infoname in xattr_mapping.items():\n\n value = info.get(infoname)\n\n if value:\n if infoname == 'upload_date':\n value = hyphenate_date(value)\n\n byte_value = value.encode('utf-8')\n write_xattr(filename, xattrname, byte_value)\n num_written += 1\n\n return [], info\n\n except XAttrUnavailableError as e:\n self._downloader.report_error(str(e))\n return [], info\n\n except XAttrMetadataError as e:\n if e.reason == 'NO_SPACE':\n self._downloader.report_warning(\n 'There\\'s no disk space left, disk quota exceeded or filesystem xattr limit exceeded. '\n + (('Some ' if num_written else '') + 'extended attributes are not written.').capitalize())\n elif e.reason == 'VALUE_TOO_LONG':\n self._downloader.report_warning(\n 'Unable to write extended attributes due to too long values.')\n else:\n msg = 'This filesystem doesn\\'t support extended attributes. '\n if compat_os_name == 'nt':\n msg += 'You need to use NTFS.'\n else:\n msg += '(You may have to enable them in your /etc/fstab)'\n self._downloader.report_error(msg)\n return [], info",
"def build_content(self) -> None:\n logger.info(__('writing content.opf file...'))\n metadata = self.content_metadata()\n\n # files\n self.files: list[str] = []\n self.ignored_files = ['.buildinfo', 'mimetype', 'content.opf',\n 'toc.ncx', 'META-INF/container.xml',\n 'Thumbs.db', 'ehthumbs.db', '.DS_Store',\n 'nav.xhtml', self.config.epub_basename + '.epub'] + \\\n self.config.epub_exclude_files\n if not self.use_index:\n self.ignored_files.append('genindex' + self.out_suffix)\n for root, dirs, files in os.walk(self.outdir):\n dirs.sort()\n for fn in sorted(files):\n filename = relpath(path.join(root, fn), self.outdir)\n if filename in self.ignored_files:\n continue\n ext = path.splitext(filename)[-1]\n if ext not in self.media_types:\n # we always have JS and potentially OpenSearch files, don't\n # always warn about them\n if ext not in ('.js', '.xml'):\n logger.warning(__('unknown mimetype for %s, ignoring'), filename,\n type='epub', subtype='unknown_project_files')\n continue\n filename = filename.replace(os.sep, '/')\n item = ManifestItem(html.escape(quote(filename)),\n html.escape(self.make_id(filename)),\n html.escape(self.media_types[ext]))\n metadata['manifest_items'].append(item)\n self.files.append(filename)\n\n # spine\n spinefiles = set()\n for refnode in self.refnodes:\n if '#' in refnode['refuri']:\n continue\n if refnode['refuri'] in self.ignored_files:\n continue\n spine = Spine(html.escape(self.make_id(refnode['refuri'])), True)\n metadata['spines'].append(spine)\n spinefiles.add(refnode['refuri'])\n for info in self.domain_indices:\n spine = Spine(html.escape(self.make_id(info[0] + self.out_suffix)), True)\n metadata['spines'].append(spine)\n spinefiles.add(info[0] + self.out_suffix)\n if self.use_index:\n spine = Spine(html.escape(self.make_id('genindex' + self.out_suffix)), True)\n metadata['spines'].append(spine)\n spinefiles.add('genindex' + self.out_suffix)\n # add auto generated files\n for name in self.files:\n if name not in spinefiles and name.endswith(self.out_suffix):\n spine = Spine(html.escape(self.make_id(name)), False)\n metadata['spines'].append(spine)\n\n # add the optional cover\n html_tmpl = None\n if self.config.epub_cover:\n image, html_tmpl = self.config.epub_cover\n image = image.replace(os.sep, '/')\n metadata['cover'] = html.escape(self.make_id(image))\n if html_tmpl:\n spine = Spine(html.escape(self.make_id(self.coverpage_name)), True)\n metadata['spines'].insert(0, spine)\n if self.coverpage_name not in self.files:\n ext = path.splitext(self.coverpage_name)[-1]\n self.files.append(self.coverpage_name)\n item = ManifestItem(html.escape(self.coverpage_name),\n html.escape(self.make_id(self.coverpage_name)),\n html.escape(self.media_types[ext]))\n metadata['manifest_items'].append(item)\n ctx = {'image': html.escape(image), 'title': self.config.project}\n self.handle_page(\n path.splitext(self.coverpage_name)[0], ctx, html_tmpl)\n spinefiles.add(self.coverpage_name)\n\n auto_add_cover = True\n auto_add_toc = True\n if self.config.epub_guide:\n for type, uri, title in self.config.epub_guide:\n file = uri.split('#')[0]\n if file not in self.files:\n self.files.append(file)\n if type == 'cover':\n auto_add_cover = False\n if type == 'toc':\n auto_add_toc = False\n metadata['guides'].append(Guide(html.escape(type),\n html.escape(title),\n html.escape(uri)))\n if auto_add_cover and html_tmpl:\n metadata['guides'].append(Guide('cover',\n self.guide_titles['cover'],\n html.escape(self.coverpage_name)))\n if auto_add_toc and self.refnodes:\n metadata['guides'].append(Guide('toc',\n self.guide_titles['toc'],\n html.escape(self.refnodes[0]['refuri'])))\n\n # write the project file\n copy_asset_file(path.join(self.template_dir, 'content.opf_t'), self.outdir, metadata)",
"def generate_hash(self):\r\n\r\n hash_list = []\r\n for root, dirs, files in os.walk(self.options['source']):\r\n for f in sorted([f for f in files if not f.startswith('.')]):\r\n hash_list.append(os.path.join(root, f))\r\n hash_list.append(str(os.path.getmtime(os.path.join(root, f))))\r\n hash_list = ''.join(hash_list)\r\n\r\n if sys.version < '3':\r\n return hashlib.sha1(hash_list).hexdigest()\r\n return hashlib.sha1(hash_list.encode('utf-8')).hexdigest()",
"def create_metadata(scene: \"Scenemaker\") -> None:\r\n create_datadir()\r\n\r\n with open(dirpath / cng.GENERATED_DATA_DIR / cng.METADATA_FILE, \"w+\") as f:\r\n f.write(str(scene.num2name))",
"def get_and_update_metadata():\n if not os.path.exists('.git') and os.path.exists(METADATA_FILENAME):\n with open(METADATA_FILENAME) as fh:\n metadata = json.load(fh)\n else:\n git = Git()\n revision = os.environ.get('TRAVIS_BUILD_NUMBER', git.revision)\n split_version = git.version.split('.')\n split_version[-1] = revision\n version = '.'.join(split_version)\n metadata = {\n 'version': version,\n 'git_hash': git.hash,\n 'git_origin': git.origin,\n 'git_branch': git.branch,\n 'git_version': git.version\n }\n with open(METADATA_FILENAME, 'w') as fh:\n json.dump(metadata, fh)\n return metadata",
"def bundle_metadata(self, metadata):\n\n metadata_file = None\n try:\n metadata_file = tempfile.NamedTemporaryFile(delete=False)\n except IOError:\n task_error('Cannot create metadata file in working directory')\n\n metadata_file.write(metadata)\n fname = metadata_file.name\n metadata_file.close()\n\n metadata_file = open(fname, mode='rb')\n\n # metadata_file.seek(0)\n\n if self.empty_tar:\n tarball = tarfile.TarFile(name=self.bundle_path, mode='w')\n self.empty_tar = False\n else:\n tarball = tarfile.TarFile(name=self.bundle_path, mode='a')\n\n try:\n tar_info = tarfile.TarInfo('metadata.txt')\n tar_info.size = len(metadata)\n tar_info.mtime = time.time()\n tarball.addfile(tar_info, metadata_file)\n metadata_file.close()\n tarball.close()\n os.remove(fname)\n except Exception, ex:\n print ex\n traceback.print_exc(file=sys.stdout)\n raise ex"
] |
[
"0.61526465",
"0.6049966",
"0.5853711",
"0.5817837",
"0.5641126",
"0.5638058",
"0.5613152",
"0.5524916",
"0.5511576",
"0.5498614",
"0.54815716",
"0.54704076",
"0.5461879",
"0.5444722",
"0.54036105",
"0.53994805",
"0.53729725",
"0.5360554",
"0.53537804",
"0.53431183",
"0.5313954",
"0.5287763",
"0.528125",
"0.52692026",
"0.5260768",
"0.52528274",
"0.524233",
"0.5220996",
"0.5190659",
"0.519015"
] |
0.7596386
|
0
|
Uploads a file to S3 bucket. If gzip=True, compress and upload the gzipped version of the file instead of the original one. If gzip=True and it is not possible to compress, then quit the upload process (don't upload at all). So you should always pass the correct gzip info into this function, in order to get a upload.
|
def upload_file(conn, filename_local, filename_s3, gzip=False):
filename_s3 = filename_s3.lstrip('./')
file_descriptor = open(filename_local, 'rb')
content = file_descriptor.read()
content_type = _get_content_type(file_descriptor)
headers = _get_headers(content_type)
#should compress if the file is compressable and gzip is enabled
can_be_gzipped = _file_can_be_compressed(filename_local)
if gzip and can_be_gzipped:
content = _compress_string(content)
headers['Content-Length'] = str(len(content))
headers['Content-Encoding'] = 'gzip'
extension = mimetypes.guess_extension(content_type)
#we should not overwrite the original file in the server.
#We change extensions: style.css --> style.gz.css, for instance
filename_s3 = filename_s3.rstrip(extension) + '.gz' + extension
#if gzip is enabled and it is not compressable, don't upload nothing at all
elif gzip and not can_be_gzipped:
return
#upload
print 'Uploading %s to %s' % (filename_local, filename_s3)
_put(conn, filename_s3, content, headers=headers)
file_descriptor.close()
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def upload_file(file_obj, filename, session, samples_resource, log_to, metadata, tags):\n upload_args = {\n 'filename': filename,\n 'size': 1, # because we don't have the actually uploaded size yet b/c we're gziping it\n 'upload_type': 'standard' # This is multipart form data\n }\n if metadata:\n upload_args['metadata'] = metadata\n\n if tags:\n upload_args['tags'] = tags\n\n try:\n upload_info = samples_resource.init_upload(upload_args)\n\n except requests.exceptions.HTTPError as e:\n error_object = e[0]\n process_api_error(error_object)\n\n upload_url = upload_info['upload_url']\n\n # Need a OrderedDict to preserve order for S3 (although this doesn't actually matter?)\n multipart_fields = OrderedDict()\n for k, v in upload_info['additional_fields'].items():\n multipart_fields[str(k)] = str(v)\n\n # First validate the file if a FASTXTranslator\n if isinstance(file_obj, FASTXTranslator):\n file_obj.validate()\n\n # If it isn't being modified and is already compressed, don't bother re-parsing it\n if not file_obj.modified and file_obj.is_gzipped:\n file_obj = FASTXReader(file_obj.reads.file_obj.fileobj,\n progress_callback=file_obj.progress_callback)\n\n multipart_fields['file'] = (filename, file_obj, 'application/x-gzip')\n encoder = MultipartEncoder(multipart_fields)\n content_type = encoder.content_type\n\n # try to upload the file, retrying as necessary\n max_retries = 3\n n_retries = 0\n while n_retries < max_retries:\n try:\n upload_request = session.post(upload_url, data=encoder,\n headers={'Content-Type': content_type}, auth={})\n if upload_request.status_code not in [200, 201]:\n msg = 'Upload failed. Please contact [email protected] for assistance.'\n if upload_request.status_code >= 400 and upload_request.status_code < 500:\n try:\n msg = '{}. Please ensure your file is valid and then try again.'.format(\n upload_request.json()['message']\n )\n except Exception:\n pass\n raise UploadException(msg)\n\n file_obj.close()\n break\n except requests.exceptions.ConnectionError as e:\n # For proxy, try special route to check the errors\n # in case Python is just dropping the Connection due to validation issues\n if multipart_fields.get('sample_id'):\n error_url = '/'.join(upload_url.split('/')[:-1]) + '/errors'\n try:\n e_resp = session.post(error_url, json={'sample_id': multipart_fields.get('sample_id')})\n if e_resp.status_code == 200:\n msg = '{}. Please ensure your file is valid and then try again.'.format(\n e_resp.json()['message']\n )\n raise UploadException(msg)\n except requests.exceptions.RequestException:\n pass\n\n n_retries += 1\n # reset the file_obj back to the start; we may need to rebuild the encoder too?\n file_obj.seek(0)\n if n_retries == max_retries:\n raise UploadException(\n \"The command line client is experiencing connectivity issues and \"\n \"cannot complete the upload of %s at this time. Please try again \"\n \"later. If the problem persists, contact us at [email protected] \"\n \"for assistance.\" % filename\n )\n\n # Finally, issue a callback\n try:\n if not multipart_fields.get('callback_url'):\n samples_resource.confirm_upload({\n 'sample_id': upload_info['sample_id'],\n 'upload_type': 'standard'\n })\n except requests.exceptions.HTTPError:\n raise UploadException('Failed to upload: %s' % filename)\n\n if log_to is not None:\n log_to.write('\\rUploading: {} finished as sample {}.\\n'.format(\n filename, upload_info['sample_id']\n ))\n log_to.flush()\n return upload_info['sample_id']",
"def _upload_file(file_name, bucket, object_name):\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n\n # Upload the file\n try:\n s3.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True",
"def upload_file(self, file_name, bucket, object_name=None):\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n\n # Upload the file\n s3_client = boto3.client('s3', aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key)\n try:\n s3_client.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.exception(e)\n return False\n logging.info(\"Upload to S3 bucket complete!\")\n\n return True",
"def upload_file_s3(file_name, bucket):\n\n # If S3 object_name was not specified, use file_name \n try:\n response = s3_client.upload_file(file_name,\n bucket, \n file_name.replace('../',''))\n print(\"Uploaded \" + file_name)\n except ClientError as e:\n print(\"Failed to upload \" + file_name)\n logging.error(e)\n return False\n return True",
"def upload_to_s3(file_name, bucket, key): \n s3 = boto3.resource('s3') \n try:\n s3.meta.client.upload_file(file_name, bucket, key)\n print(\"s3 upload success -- uploaded \" + file_name + \" to the bucket: \" + bucket)\n except ClientError as e:\n logging.error(e)\n return False\n print(\"s3 upload error occurs\", e)\n return True",
"def upload_file(file_name, bucket):\r\n object_name = file_name\r\n s3_client = boto3.client('s3')\r\n response = s3_client.upload_file(file_name, bucket, object_name)\r\n\r\n return response",
"def upload_file(file_name, bucket, object_name=None):\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n\n # Upload the file\n try:\n response = s3_client.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True",
"def upload_file(file_name, bucket, object_name=None):\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n\n # Upload the file\n s3_client = boto3.client('s3', aws_access_key_id='', aws_secret_access_key='')\n try:\n response = s3_client.upload_file(file_name, bucket, object_name, ExtraArgs={'ACL':'public-read'})\n except ClientError as e:\n logging.error(e)\n return False\n return True",
"def save_to_s3(bucket, path, data, compress=False):\n bucket = get_bucket(bucket)\n\n key = Key(bucket)\n key.key = path\n logger.debug(\"Uploading to %s\", key.key)\n\n if compress:\n mock_file = BytesIO()\n gzip_obj = gzip.GzipFile(filename='gzipped_file', mode='wb', fileobj=mock_file)\n if isinstance(data, str):\n data = data.encode('utf-8')\n gzip_obj.write(data)\n gzip_obj.close()\n data = mock_file.getvalue()\n\n key.set_contents_from_string(data)",
"def upload_file(file_name, object_name=None, bucket = BUCKET_NAME):\n\n # Upload the file\n s3_client = boto3.client('s3')\n try:\n response = s3_client.upload_file(file_name, bucket, object_name) if type(file_name) == str else s3_client.upload_fileobj(file_name, BUCKET_NAME, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True",
"def upload_file(file_name, bucket, object_name=None):\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n\n # Upload the file\n s3_client = boto3.client('s3')\n try:\n response = s3_client.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True",
"def upload_file(file_name, bucket, object_name=None):\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n\n # Upload the file\n s3_client = boto3.client('s3')\n try:\n response = s3_client.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True",
"def upload_file(file_name, bucket, object_name=None):\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n\n # Upload the file\n s3_client = boto3.client(\"s3\")\n try:\n response = s3_client.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True",
"def _upload_s3(self, filename, bucket, objectKey):\n return s3_client.upload_file(filename, bucket, objectKey)",
"def upload(self, bucket, obj, s3_client=None):\n\n s3_client = s3_client or self.s3_client\n transfer_config = boto3.s3.transfer.TransferConfig(multipart_threshold=1024, use_threads=True, max_concurrency=10)\n s3_transfer = boto3.s3.transfer.S3Transfer(client=s3_client, config=transfer_config)\n\n try:\n logging.debug(\"Uploading {} to {}\".format(obj, bucket))\n s3_transfer.upload_file(obj, bucket, helpers.strip_path(obj)[1])\n\n return True\n except botocore.exceptions.EndpointConnectionError:\n logging.error(\"Couldn't connect to an S3 endpoint. If you're using an S3 compatible provider other than AWS, remember to set --s3-endpoint-url\")\n return False\n except Exception as e:\n logging.error(\"Error uploading: {}\".format(e))\n return False",
"def upload_file(file_name, bucket, object_name=None):\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = os.path.basename(file_name)\n\n # Upload the file\n s3_client = boto3.client('s3')\n try:\n response = s3_client.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True",
"def upload(\n bucket: str, key: str, filename: str, session: Optional[boto3.Session] = None\n) -> None:\n s3_client = _get_client(session)\n LOGGER.info(\"uploading %s to s3://%s/%s...\", filename, bucket, key)\n s3_client.upload_file(Filename=filename, Bucket=bucket, Key=key)",
"def upload_to_s3(bucket, file_path, prefix, timestamp):\n upload_name = f'{prefix}_{timestamp or \"\"}{basename(file_path)}'\n\n try:\n bucket.upload_file(file_path, upload_name)\n syslog.syslog(syslog.LOG_INFO,\n f'Uploaded {file_path} to S3 Bucket - {bucket.name}')\n return True\n except S3UploadFailedError as s3ex:\n syslog.syslog(\n syslog.LOG_ERR, f'Failed to upload {file_path} to S3 Bucket - {bucket_name} - {s3ex}')\n return False\n finally:\n rm(file_path)",
"def upload_to_s3(site, bucket, directory=None, files=None, prefix=None):\n if bucket is None:\n print red('Error: Bucket must be specified.')\n return\n if directory is None and files is None:\n print red('Error: Directory and/or files must be specified.')\n return\n # Setup boto\n import boto\n from boto.s3.bucket import Bucket\n from boto.s3.key import Key\n import mimetypes\n import fnmatch\n\n setup_aws_access_key(site)\n\n # Connect to S3\n c = boto.connect_s3()\n b = Bucket(c, bucket)\n\n # Fix the prefix\n # prefix itself shouldn't have a / prefix itself but should end with /\n if prefix:\n prefix = prefix.lstrip('/')\n if prefix and not prefix.endswith('/'):\n prefix = prefix + '/'\n\n def __upload(key, filename):\n k = Key(b)\n k.key = key\n headers = {}\n content_type = mimetypes.guess_type(filename)[0]\n if site.has_key('webapp') and site['webapp'].get('cache_control'):\n for pattern in site['webapp']['cache_control']:\n if fnmatch.fnmatch(filename, pattern):\n headers['Cache-Control'] = site['webapp']['cache_control'][pattern]\n break\n if site.has_key('webapp') and site['webapp'].get('gzip_types') and content_type in site['webapp']['gzip_types']:\n from gzip import GzipFile\n from StringIO import StringIO\n # Need to specify content_type when uploading from a string!\n headers['Content-Type'] = content_type\n headers['Content-Encoding'] = 'gzip'\n s = StringIO()\n g = GzipFile(fileobj=s, mode='wb')\n with open(filename, 'rb') as f:\n g.write(f.read())\n g.close()\n k.set_contents_from_string(s.getvalue(), headers)\n else:\n k.set_contents_from_filename(filename, headers)\n\n if files:\n # Upload individual files\n if directory:\n keys = [filename.lstrip('/') for filename in files]\n files = [os.path.join(directory, filename) for filename in files]\n else:\n keys = [os.path.split(filename)[1] for filename in files]\n for i, filename in enumerate(files):\n print 'Uploading %s' % keys[i]\n if prefix:\n key = prefix + keys[i]\n else:\n key = keys[i]\n __upload(key, filename)\n elif directory:\n # Upload an entire directory\n def __upload_dir(arg, dirname, names):\n # arg is the starting directory\n for name in names:\n filename = os.path.join(dirname, name)\n if not os.path.isdir(filename) and not os.path.islink(filename) and not name.startswith('.'):\n key = filename[len(arg):]\n if key.startswith('/'):\n key = key[1:]\n if prefix:\n key = prefix + key\n print 'Uploading %s' % key\n __upload(key, filename)\n os.path.walk(directory, __upload_dir, directory)",
"def upload_s3_file(key, bucket, filename):\n s3_client = boto3.client('s3')\n s3_client.upload_file(filename, bucket, key)\n return True",
"def upload_file(file_name, bucket_name, object_name=None):\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = os.path.basename(file_name)\n\n try:\n # Upload the file\n response = s3.upload_file(file_name, bucket_name, object_name)\n # Get list of files in bucket to confirm\n describe_objects(bucket_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True",
"def upload(filename, bucket):\n print(\"Uploading {} to S3\".format(filename.lower().replace('_', '-')))\n url = \"https://s3.ca-central-1.amazonaws.com/{}/{}\".format(bucket,\n filename.lower().replace('_', '-'))\n with open('{}/{}'.format(WORK_DIR, filename), 'rb') as data:\n requests.put(url, data=data)",
"def upload_file(s3_client, file_name, object_name=None):\n\n # read bucket name from cfg file\n bucket = config.get('S3', 'LANDING_ZONE')\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name.split('\\\\')[-1]\n\n # Upload the file\n try:\n response = s3_client.upload_file(file_name, bucket, object_name, Callback=ProgressPercentage(file_name))\n# logger.debug(f\"Got response from s3 client for uploading file: {response}\")\n except Exception as e:\n logger.error(f\"Error occurred while upload {file_name} : {e}\")\n return False\n return True",
"def upload_file(file_name: str, bucket: str, object_name: str =None) -> None:\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n\n # Upload the file\n s3_client = boto3.client(\"s3\")\n try:\n s3_client.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)",
"def upload_file_to_bucket(s3_client, file_obj, bucket, folder, object_name=None):\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_obj\n\n # Upload the file\n try:\n response = s3_client.upload_fileobj(file_obj, bucket, f\"{folder}/{object_name}\")\n print(response)\n except ClientError:\n return False\n return True",
"def upload_to_s3(file_path, config):\n logging.info(\"Uploading file to S3 bucket: %s\", config['s3_bucket_name'])\n s3 = boto3.resource('s3')\n s3_filename = config['s3_bucket_path'] + config['rendered_filename']\n s3.Bucket(config['s3_bucket_name']).upload_file(\n file_path, s3_filename, ExtraArgs={\n 'ContentType': 'text/html', 'ACL': 'public-read'})",
"def upload_file(file_name, bucket, object_name='patients.log'):\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = file_name\n # Upload the file\n s3_client = boto3.client('s3')\n try:\n response = s3_client.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True",
"def upload_object(self, file_path, s3_path):\n logging.info(\"Uploading file to \\\"{}\\\" to S3\".format(s3_path))\n bucket_name, key = S3Util.get_bucket_and_key(s3_path)\n self.s3_resource.Bucket(bucket_name).upload_file(file_path, key)",
"def upload(self, file_path, bucket_name, file_name):\n\n self.client.upload_file(file_path, bucket_name, file_name)",
"def upload(iid, file_obj, content_type):\n if AWS_CLIENT_CONFIG and BUCKET_NAME:\n try:\n s3 = boto3.resource('s3', **AWS_CLIENT_CONFIG)\n s3.Bucket(BUCKET_NAME).put_object(Key=iid,\n Body=file_obj,\n ContentType=content_type)\n return StorageType.S3\n except botocore.exceptions.ClientError as e:\n logger.error(e)\n else:\n # store locally in temp dir (tests, local development)\n store_temp_file(iid, file_obj)\n return StorageType.TMP\n return None"
] |
[
"0.695896",
"0.68229353",
"0.68038195",
"0.6781243",
"0.67778784",
"0.6743604",
"0.6743087",
"0.67419064",
"0.6726084",
"0.67161316",
"0.6695823",
"0.6695823",
"0.66937494",
"0.66827035",
"0.667724",
"0.6662698",
"0.66604984",
"0.6652789",
"0.66431695",
"0.66219",
"0.6605921",
"0.65906703",
"0.65893906",
"0.65613395",
"0.6542626",
"0.64855653",
"0.6418108",
"0.63972515",
"0.63959455",
"0.6392514"
] |
0.78993046
|
0
|
Returns a recursive list of all files inside folder. The list element is a string w/ file path relative to folder. If any file is found with the same name as LOCAL_METADATA_FILE, then do not append it to the list.
|
def _get_file_list(folder):
tree = [x for x in os.walk(folder)]
files = [os.path.join(t[0], y) for t in tree for y in t[2]]
return [os.path.relpath(x, start=folder)
for x in files if x != LOCAL_METADATA_FILE]
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def files_in_folder(folder):\n files = []\n for f in glob.glob(folder):\n if os.path.isdir(f):\n files.extend(files_in_folder(f + os.sep + \"**\"))\n else:\n files.append(f)\n return files",
"def ReadFolder(folder: str) -> List[str]:\n\n onlyfiles = [f for f in listdir(folder) if isfile(join(folder, f))]\n \n return onlyfiles",
"def list_file(self, path: str) -> List[FileName]:\n current_file = self.file_exists(path)\n if current_file is None:\n raise FileNotExistException(\n \"Path doesn't exist. Can't list non-existent path\"\n )\n if current_file.file_type != GOOGLE_FOLDER_TYPE:\n raise NotAFolderException(\"file_nod has to be a folder to list contents\")\n if current_file.children is None:\n raise NotAFolderException(\"file_nod has to be a folder to list contents\")\n if len(current_file.children) == 0:\n return []\n return [\n child_file.file_name for child_file in list(current_file.children.values())\n ]",
"def list_files_in_given_folder(path_to_folder):\r\n file_names_list = []\r\n for file_name in glob.glob(path_to_folder+\"/*\"):\r\n file_names_list.append(file_name)\r\n assert file_names_list != [], \"failed to populate folder\"+path_to_folder\r\n return file_names_list",
"def listfiles(self, *path):\n dir = self.localpath(*path)\n files = []\n for root, dirs, fnms in os.walk(dir):\n for f in fnms:\n if f[-5:] == '.info' and os.path.exists(os.path.join(root, f[:-5])):\n try:\n _open_file_info(os.path.join(root, f))\n files.append(\n path + tuple(_split_path(\n os.path.relpath(os.path.join(root, f[:-5]), start=dir)\n )))\n except ValueError:\n pass\n return files",
"def list_all_files(root):\n local_files = []\n for path, dirs, files in os.walk(os_path(root), followlinks=False):\n if len(files) > 0:\n path_wo_root = path[(len(root) + len(slash)):] # remove root part\n local_files.extend([os.path.join(path_wo_root, f) for f in files])\n return local_files",
"def list_files_recursively(\n api,\n query,\n parent,\n files=[],\n folder_name=\"\",\n):\n # type checking\n if isinstance(parent, sbg.models.project.Project):\n parent_id = parent.root_folder\n elif isinstance(parent, sbg.models.file.File):\n parent_id = parent.id\n\n if not folder_name:\n folder_name = Path(folder_name)\n for file in query.all():\n if not file.is_folder():\n file.metadata[\"parent_file_name\"] = folder_name\n files.append(file)\n else:\n folder_name = folder_name / file.name\n res = list_files_recursively(\n api,\n api.files.query(parent=file),\n folder_name=folder_name,\n parent=file,\n )\n folder_name = folder_name.parents[0]\n return files",
"def listFilesInDir(self, path, recursive=False, fileNameOnly=True, filter=None):\n self._checkActive()\n def _process(args, path, ttype, moddate=0, size=0, md5hash=\"\"):\n fileNameOnly, filter, pathsreturn = args \n if ttype == \"F\":\n if (filter is None) or fnmatch.fnmatch(path, filter):\n #fullpath=q.system.fs.joinPaths(path, fileNameOnly)\n if fileNameOnly:\n pathsreturn.append(q.system.fs.getBaseName(path))\n else:\n pathsreturn.append(path)\n pathsreturn=[]\n self.walk(_process, (fileNameOnly, filter, pathsreturn) , path, recursive=recursive) \n return pathsreturn",
"def get_files_by_folder(path):\n\n f = []\n for (dirpath, dirnames, filenames) in walk(path):\n f.extend(filenames)\n break\n return f",
"def get_files(folder_name: str) -> list:\r\n files = [f for f in os.listdir(os.path.join(os.getcwd(), folder_name))\r\n if os.path.isfile(os.path.join(os.getcwd(), folder_name, f))]\r\n return files",
"def get_file_list(file_or_folder, with_subfolders):\n if os.path.isfile(file_or_folder):\n return [file_or_folder] if is_processable_file(file_or_folder) else []\n elif os.path.isdir(file_or_folder):\n file_list = []\n if with_subfolders:\n for path, _, files in os.walk(file_or_folder):\n for name in files:\n if is_processable_file(name):\n file_list.append(os.path.join(path, name))\n else:\n for item in os.listdir(file_or_folder):\n if is_processable_file(item):\n candidate = os.path.join(file_or_folder, item)\n if os.path.isfile(candidate):\n file_list.append(candidate)\n return file_list\n else:\n return []",
"def get_files_from_directory(self, folder):\n return ['{}/{}'.format(folder, each) for each in os.listdir(folder) if each.endswith('.vm')]",
"def getFileList(folder_path):\n file_path_list = []\n if os.path.exists(folder_path):\n for path, _, files in os.walk(folder_path):\n if not files:\n continue\n for file in files:\n file_path_list.append(os.path.join(path, file))\n return file_path_list",
"def _list_files(folder, pattern):\n for root, folders, files in os.walk(folder):\n for filename in files:\n if fnmatch.fnmatch(filename, pattern):\n yield os.path.join(root, filename)",
"def _list_files(folder, pattern):\n for root, folders, files in os.walk(folder):\n for filename in files:\n if fnmatch.fnmatch(filename, pattern):\n yield os.path.join(root, filename)",
"def get_file_paths_recursive(folder=None, file_ext=None):\n file_list = []\n if folder is None:\n return file_list\n\n # for dir_path, dir_names, file_names in os.walk(folder):\n # for file_name in file_names:\n # if file_ext is None:\n # file_list.append(os.path.join(dir_path, file_name))\n # continue\n # if file_name.endswith(file_ext):\n # file_list.append(os.path.join(dir_path, file_name))\n file_list = [os.path.join(folder, f) for f in sorted(os.listdir(folder)) if f.endswith(file_ext)]\n\n return file_list",
"def get_files_in_dir(dir_path: str) -> List[FileInfo]:\n dir_walk_items = os.walk(dir_path)\n\n all_files = []\n for dir_walk_item in dir_walk_items:\n path_to_dir = dir_walk_item[0]\n file_names = dir_walk_item[2]\n for file_name in file_names:\n if file_name not in IGNORED_FILES:\n all_files.append(\n FileInfo.create(path_to_dir, file_name)\n )\n\n return all_files",
"def getFiles(folderToProcess,filter):\n\n print(f\"Parsing {folderToProcess} for {filter} files\")\n\n if debug:\n for path in Path(folderToProcess).rglob(filter):\n print(f\"Found {path}\")\n\n all_files = [str(x) for x in Path(folderToProcess).rglob(filter)] \n\n return all_files",
"def get_all_files_walk(folder):\n files = []\n for root, dirs, filenames in os.walk(folder):\n files.extend(os.path.join(root, f) for f in filenames)\n return files",
"def file_list(folder_path: List[str]) -> list:\n drive = _drive_gen()\n return _list_file(folder_path, drive)[1]",
"def _listdir(folder):\n\tfilePattern = r\"^\\d{4}\\-(0?[1-9]|1[012])\\-(0?[1-9]|[12][0-9]|3[01])\\-clipping\\-[\\d]*\\.json$\"\n\tfilenames = [f for f in os.listdir(folder) if re.match(filePattern, f)]\n\treturn filenames",
"def get_file_list(folder):\n\tfilelist = []\n\tfor file in os.listdir(folder):\n\t\tif file.endswith('.png'):\n\t\t\tfilelist.append(file)\n\treturn filelist",
"def folder(fpath):\n file_paths = glob.glob(fpath + '/*.dat')\n return list(file_paths)",
"def get_list_of_files_in_folder(\n self, folder_name: str, limit: int = 1\n ) -> List[str]:\n\n files = []\n if os.path.isdir(folder_name):\n # Get list of only html files from folder:\n files = [file for file in os.listdir(folder_name) if file.endswith(\".html\")]\n\n if len(files) < limit: # short dialogs\n return []\n\n # Descending sort to consider message order:\n files = sorted(\n files,\n key=lambda x: int(re.search(r\"messages(\\d+)\\.html\", x).group(1)),\n reverse=True,\n )\n else:\n print(f\"No such directory: {folder_name}\")\n return files",
"def get_file_list(rootdir): #{{{\n file_list = []\n for f in os.listdir(rootdir):\n if f == None or not f.endswith(\".csv\"):\n continue\n file_list.append(os.path.join(rootdir, f))\n \n return file_list",
"def list_files(file, folder, extension = '*.evtx'):\r\n if file:\r\n return [file]\r\n elif folder:\r\n return [ y for x in os.walk(folder) for y in glob(os.path.join(x[0], extension))]\r\n else:\r\n return []",
"def json_files_from_folder(folder: str) -> list:\n\n files = []\n for file_name in os.listdir(folder):\n splitted_filename = file_name.split(\".\")\n if splitted_filename[-1] == \"json\":\n files.append(file_name)\n return files",
"def __getFileList(self, path, filterRe):\n path = os.path.abspath(path)\n files = []\n for dirname, _, names in os.walk(path):\n files.extend([os.path.join(dirname, f)\n for f in names\n if re.match(filterRe, f)]\n )\n return files",
"def _get_files(self, path):\n result = []\n for f in os.listdir(path):\n if os.path.isdir(os.path.join(path, f)):\n result += self._get_files(os.path.join(path, f))\n else:\n result.append(os.path.join(path, f))\n return result",
"def parse_folder(self, path):\n\n data = []\n for filename in os.listdir(path):\n data.append(self.parse_file(os.path.join(path, filename), filename))\n return data"
] |
[
"0.66013783",
"0.6589332",
"0.6403268",
"0.63590515",
"0.6349886",
"0.6340192",
"0.6325134",
"0.62352633",
"0.61990255",
"0.61880994",
"0.618564",
"0.61627126",
"0.61410564",
"0.6138895",
"0.6138895",
"0.6132723",
"0.6122742",
"0.610428",
"0.60967124",
"0.6089745",
"0.6074586",
"0.6074562",
"0.60325915",
"0.60208607",
"0.6015542",
"0.6005542",
"0.6000987",
"0.5985674",
"0.5985037",
"0.5977303"
] |
0.81068176
|
0
|
Fetches the metadata remote file REMOTE_METADATA_FILE and returns the metadata dict equivalent.
|
def _fetch_current_remote_metadata(conn):
content = _get(conn, REMOTE_METADATA_FILE)
metadata = json.loads(content) if content else {}
return metadata
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _get(conn, remote_file, bucket_name=BUCKET_NAME):\n contents = None\n try:\n reply = conn.get(bucket_name, remote_file)\n contents = reply.body\n if reply.http_response.status != 200:\n print 'Failed to fetch current_remote metadata'\n contents = None\n except:\n contents = None\n return contents",
"def _fetch_current_local_metadata():\n if not os.path.exists(LOCAL_METADATA_FILE):\n return {}\n\n with open(LOCAL_METADATA_FILE) as f:\n return json.loads(f.read())",
"def fetchPRIDEProject(remote_file:URIType, cachedFilename:AbsPath, secContext:Optional[SecurityContextConfig]=None) -> Tuple[Union[URIType, ContentKind], List[URIWithMetadata]]:\n \n parsedInputURL = parse.urlparse(remote_file)\n projectId = parsedInputURL.path\n metadata_url = parse.urljoin(PRIDE_PROJECTS_REST, projectId)\n \n metadata_array = [\n URIWithMetadata(remote_file, {'fetched': metadata_url})\n ]\n metadata = None\n try:\n metaio = io.BytesIO()\n _ , metametaio = fetchClassicURL(metadata_url, metaio)\n metadata = json.loads(metaio.getvalue().decode('utf-8'))\n metadata_array.extend(metametaio)\n except urllib.error.HTTPError as he:\n raise WFException(\"Error fetching PRIDE metadata for {} : {} {}\".format(projectId, he.code, he.reason))\n \n try:\n pride_project_url = metadata['_links']['datasetFtpUrl']['href']\n except Exception as e:\n raise WFException(\"Error processing PRIDE project metadata for {} : {}\".format(remote_file, e))\n \n return pride_project_url, metadata_array",
"def parse_remote_metadata(self, timeout=30):\n for metadataUrl in self.metadataUrls:\n if (\n metadataUrl[\"url\"] is not None and metadataUrl[\"format\"].lower() == \"text/xml\"\n ):\n try:\n content = openURL(metadataUrl[\"url\"], timeout=timeout, headers=self.headers, auth=self.auth)\n doc = etree.fromstring(content.read())\n\n if metadataUrl[\"type\"] == \"FGDC\":\n mdelem = doc.find(\".//metadata\")\n if mdelem is not None:\n metadataUrl[\"metadata\"] = Metadata(mdelem)\n else:\n metadataUrl[\"metadata\"] = None\n elif metadataUrl[\"type\"] in [\"TC211\", \"19115\", \"19139\"]:\n mdelem = doc.find(\n \".//\" + nspath_eval(\"gmd:MD_Metadata\", namespaces)\n ) or doc.find(\n \".//\" + nspath_eval(\"gmi:MI_Metadata\", namespaces)\n )\n if mdelem is not None:\n metadataUrl[\"metadata\"] = MD_Metadata(mdelem)\n else:\n metadataUrl[\"metadata\"] = None\n except Exception:\n metadataUrl[\"metadata\"] = None",
"def get_metadata(self, filename):\n return self.execute_json(filename)[0]",
"def get_metadata(self):\n\n\t\t#see redcap api documentation -- https://redcap.wustl.edu/redcap/srvrs/prod_v3_1_0_001/redcap/api/help/\n\t\tbuf = io.BytesIO()\n\n\t\tfields = {\n\t\t 'token': config['api_token'],\n\t\t 'content': 'metadata',\n\t\t 'format': 'json'\n\t\t}\n\n\t\tch = pycurl.Curl()\n\t\tch.setopt(ch.URL, config['api_url'])\n\t\tch.setopt(ch.HTTPPOST, list(fields.items()))\n\t\tch.setopt(ch.WRITEFUNCTION, buf.write)\n\t\tch.perform()\n\t\tch.close()\n\n\t\tmetadata = json.loads(buf.getvalue().decode())\n\t\tbuf.close()\n\t\treturn metadata",
"def clowder_file_metadata(session, url, fileid):\n try:\n ret = session.get(posixpath.join(url, \"api/files\", fileid, \"metadata.jsonld\"))\n except session.exceptions.RequestException as e:\n print(e)\n sys.exit(1)\n\n return ret",
"def get_downloads_metadata():\n global _METADATA\n if _METADATA is None:\n _METADATA = yaml.safe_load(resource_string(__name__, \"downloads.yml\"))\n return _METADATA",
"def read_metadata(self, file_in_cache):\n metadata_file = self.get_metadata_file(file_in_cache)\n if self.context.is_file(metadata_file):\n return json.loads(auto_decode(self.context.read_file(metadata_file)))\n else:\n return {}",
"def fetch_metadata(requests_impl=requests):\n\n print(f'fetching metadata at {Network.METADATA_URL}')\n return requests_impl.get(Network.METADATA_URL).json()",
"async def get_file_metadata(\n location_id: LocationID, file_id: StorageFileID, user_id: UserID\n):",
"def read_remote_file(org, repo, filename):\n import fsspec\n fs = fsspec.filesystem('github', org=org, repo=repo)\n\n with fs.open(filename) as f:\n data = loads(f.read())\n\n return data",
"def get_metadata(self):\n try:\n r = requests.get('https://login.mailchimp.com/oauth2/metadata', auth=self)\n except requests.exceptions.RequestException as e:\n raise e\n else:\n r.raise_for_status()\n output = r.json()\n if 'error' in output:\n raise requests.exceptions.RequestException(output['error'])\n return output",
"def _get_metadata(self, pkg_name):\n pkg_name = urllib.parse.quote(pkg_name, safe='@')\n if self.metadatas.get(pkg_name):\n return self.metadatas.get(pkg_name)\n else:\n url = urllib.parse.urljoin(self.REGISTRY, pkg_name)\n try:\n pkg_metadata = requests.get(url).json()\n self.metadatas[pkg_name] = pkg_metadata\n return pkg_metadata\n except urllib.error.HTTPError as e:\n print('Could not download {} from: {} with error: {}'. format(pkg_name, url, e.msg))\n exit(-1)",
"def get_metadata(self, resource_url):\n response = self.response(resource_url)\n body = response[0]\n return ResourceParser.extract_metadata(body)",
"def fetchZenodo(\n remote_file: \"URIType\",\n cachedFilename: \"AbsPath\",\n secContext: \"Optional[SecurityContextConfig]\" = None,\n) -> \"ProtocolFetcherReturn\":\n\n # TODO: implement support for access_token through security context\n\n # Dealing with an odd behaviour from urlparse\n for det in (\"/\", \"?\", \"#\"):\n if det in remote_file:\n parsedInputURL = urllib.parse.urlparse(remote_file)\n break\n else:\n parsedInputURL = urllib.parse.urlparse(remote_file + \"#\")\n parsed_steps = parsedInputURL.path.split(\"/\")\n\n if len(parsed_steps) < 1 or parsed_steps[0] == \"\":\n raise FetcherException(\n f\"{remote_file} is not a valid {ZENODO_SCHEME} CURIE. It should start with something like {ZENODO_SCHEME}:record_id\"\n )\n\n zenodo_id = parsed_steps[0]\n\n metadata_url = cast(\"URIType\", parse.urljoin(ZENODO_RECORD_REST, zenodo_id))\n\n gathered_meta = {\"fetched\": metadata_url}\n metadata_array = [URIWithMetadata(remote_file, gathered_meta)]\n try:\n metaio = io.BytesIO()\n _, metametaio, _ = fetchClassicURL(metadata_url, metaio)\n metadata = json.loads(metaio.getvalue().decode(\"utf-8\"))\n gathered_meta[\"payload\"] = metadata\n metadata_array.extend(metametaio)\n except urllib.error.HTTPError as he:\n raise FetcherException(\n f\"Error fetching Zenodo metadata for {zenodo_id} : {he.code} {he.reason}\"\n )\n\n if not isinstance(metadata, dict) or (metadata.get(\"conceptdoi\") is None):\n raise FetcherException(\n f\"Zenodo metadata for {zenodo_id} is inconsistent: {metadata}\"\n )\n\n zenodo_lic_id = metadata.get(\"metadata\", {}).get(\"license\", {}).get(\"id\")\n if zenodo_lic_id is None:\n raise FetcherException(\n f\"Zenodo metadata for {zenodo_id} is inconsistent: {metadata}\"\n )\n\n # Let's identify the licence of the contents\n licence_meta_url = cast(\n \"URIType\", parse.urljoin(ZENODO_LICENSE_REST, zenodo_lic_id)\n )\n\n gathered_l_meta = {\"fetched\": licence_meta_url}\n metadata_array.append(URIWithMetadata(remote_file, gathered_l_meta))\n try:\n metaio = io.BytesIO()\n _, metametalicio, _ = fetchClassicURL(licence_meta_url, metaio)\n l_metadata = json.loads(metaio.getvalue().decode(\"utf-8\"))\n gathered_l_meta[\"payload\"] = l_metadata\n metadata_array.extend(metametalicio)\n except urllib.error.HTTPError as he:\n raise FetcherException(\n f\"Error fetching Zenodo licence metadata {zenodo_lic_id} for {zenodo_id} : {he.code} {he.reason}\"\n )\n\n licence_url = l_metadata.get(\"metadata\", {}).get(\"url\")\n if licence_url is None:\n raise FetcherException(\n f\"Zenodo licence metadata {zenodo_lic_id} needed to describe {zenodo_id} is inconsistent: {l_metadata}\"\n )\n\n # When no URL, then the text should suffice\n if licence_url == \"\":\n licence_url = l_metadata[\"metadata\"].get(\"title\", zenodo_lic_id)\n\n # Let's select the contents\n kind: \"Optional[ContentKind]\" = None\n the_possible_files = metadata.get(\"files\", [])\n if len(parsed_steps) == 1:\n the_files = the_possible_files\n kind = ContentKind.Directory\n else:\n the_files = []\n prefix = \"/\".join(parsed_steps[1:])\n # Adjusting this properly\n if prefix[-1] == \"/\":\n prefix_slash = prefix\n prefix = prefix[0:-1]\n else:\n prefix_slash = prefix + \"/\"\n\n for the_file in the_possible_files:\n key = the_file.get(\"key\")\n if key is None:\n continue\n\n the_link = the_file.get(\"links\", {}).get(\"self\")\n if the_link is None:\n continue\n\n if key == prefix:\n the_files.append(the_file)\n kind = ContentKind.File\n break\n elif key.startswith(prefix_slash):\n the_files.append(the_file)\n kind = ContentKind.Directory\n\n if kind is None:\n raise FetcherException(\n f\"{remote_file} does not match contents from Zenodo entry {zenodo_id} (or entry has no associated file)\"\n )\n\n # Now, let's materialize the files\n try:\n if kind == ContentKind.Directory:\n os.makedirs(cachedFilename, exist_ok=True)\n for the_file in the_files:\n relpath = the_file[\"key\"]\n last_slash = relpath.rfind(\"/\")\n if last_slash != -1:\n the_file_local_dir = os.path.join(\n cachedFilename, relpath[0:last_slash]\n )\n os.makedirs(the_file_local_dir, exist_ok=True)\n\n the_file_local_path = cast(\n \"AbsPath\", os.path.join(cachedFilename, relpath)\n )\n _, metacont, _ = fetchClassicURL(\n the_file[\"links\"][\"self\"], the_file_local_path\n )\n metadata_array.extend(metacont)\n else:\n _, metacont, _ = fetchClassicURL(\n the_files[0][\"links\"][\"self\"], cachedFilename\n )\n metadata_array.extend(metacont)\n except urllib.error.HTTPError as he:\n raise FetcherException(\n f\"Error fetching Zenodo entry contents for {zenodo_id} : {he.code} {he.reason}\"\n )\n\n return ProtocolFetcherReturn(\n kind_or_resolved=kind,\n metadata_array=metadata_array,\n licences=(cast(\"URIType\", licence_url),),\n )",
"def read_metadata_file():\n metadata = None\n if not os.path.isfile(META_DATA_FILE):\n ppg.log_info(\"No metadata found. The earthquake splitting might have not been ran yet.\")\n else:\n ppg.log_info(\"Found metadata file\")\n metadata = pd.read_csv(META_DATA_FILE)\n return metadata",
"def getMetadata(samweb, filenameorid, locations=False):\n params = {}\n if locations: params['locations'] = True\n response = samweb.getURL(_make_file_path(filenameorid) + '/metadata', params=params)\n return convert_from_unicode(response.json())",
"def _load_metadata_from_asset():\n\n with rasterio.Env(AWS_NO_SIGN_REQUEST='YES',\n GDAL_DISABLE_READDIR_ON_OPEN='EMPTY_DIR'):\n with rasterio.open(href) as src:\n # Retrieve metadata stored in COG file\n metadata = src.profile\n metadata.update(src.tags())\n metadata['shape'] = src.shape\n\n # Retrieve COG CRS. Note: these COGs do not appear to have CRS info that can be\n # accessed via the .crs method. If this occurs assume it is in WGS84.\n # All COGs in AWS appear to be projected in WGS84.\n if src.crs is None:\n metadata['crs'] = rasterio.crs.CRS.from_epsg(4326)\n else:\n metadata['crs'] = src.crs\n\n # Compute bounding box, image footprint, and gsd\n bbox, footprint, metadata = _get_geometries(src, metadata)\n\n # Derive some additional metadata from the filename\n fname = os.path.basename(href)\n metadata = _parse_filename(fname, metadata)\n\n return metadata, bbox, footprint",
"def metadata(self):\n return parse_metadata(self.metadata_path())",
"async def fetch_metadata(self, route: str):\n data = await self.http.get_metadata(route)\n return data",
"async def get_remote_media_info(self, server_name: str, media_id: str) -> dict:\n if (\n self.federation_domain_whitelist is not None\n and server_name not in self.federation_domain_whitelist\n ):\n raise FederationDeniedError(server_name)\n\n # We linearize here to ensure that we don't try and download remote\n # media multiple times concurrently\n key = (server_name, media_id)\n async with self.remote_media_linearizer.queue(key):\n responder, media_info = await self._get_remote_media_impl(\n server_name, media_id\n )\n\n # Ensure we actually use the responder so that it releases resources\n if responder:\n with responder:\n pass\n\n return media_info",
"def remote(self, requests, file, remoteHost):\n # Set the source and dest paths\n remote_url = self.base_url + '/remote?file=' + file + \"&host=\" + remoteHost\n\n print(\"Making remote request: \" + remote_url)\n\n r = requests.get(remote_url, max_price=10)\n\n print(\"Remote request completed.\")\n\n return r.json()",
"def getFileMetadata( self, path ):\n res = self.__checkArgumentFormat( path )\n if not res['OK']:\n return res\n urls = res['Value']\n successful = {}\n failed = {}\n gLogger.debug( \"DIPStorage.getFileMetadata: Attempting to obtain metadata for %s files.\" % len( urls ) )\n serviceClient = RPCClient( self.url )\n for url in urls:\n pfn = url\n if url.find( self.url ) == 0:\n pfn = url[ ( len( self.url ) ):]\n res = serviceClient.getMetadata( pfn )\n if res['OK']:\n if res['Value']['Exists']:\n if res['Value']['Type'] == 'File':\n gLogger.debug( \"DIPStorage.getFileMetadata: Successfully obtained metadata for %s.\" % url )\n successful[url] = res['Value']\n else:\n failed[url] = 'Supplied path is not a file'\n else:\n failed[url] = 'File does not exist'\n else:\n gLogger.error( \"DIPStorage.getFileMetadata: Failed to get metdata for %s.\" % url, res['Message'] )\n failed[url] = res['Message']\n resDict = {'Failed':failed, 'Successful':successful}\n return S_OK( resDict )",
"def metadata_file(self):\n return self._metadata_file",
"async def _download_remote_file(\n self,\n server_name: str,\n media_id: str,\n ) -> dict:\n\n file_id = random_string(24)\n\n file_info = FileInfo(server_name=server_name, file_id=file_id)\n\n with self.media_storage.store_into_file(file_info) as (f, fname, finish):\n request_path = \"/\".join(\n (\"/_matrix/media/r0/download\", server_name, media_id)\n )\n try:\n length, headers = await self.client.get_file(\n server_name,\n request_path,\n output_stream=f,\n max_size=self.max_upload_size,\n args={\n # tell the remote server to 404 if it doesn't\n # recognise the server_name, to make sure we don't\n # end up with a routing loop.\n \"allow_remote\": \"false\"\n },\n )\n except RequestSendFailed as e:\n logger.warning(\n \"Request failed fetching remote media %s/%s: %r\",\n server_name,\n media_id,\n e,\n )\n raise SynapseError(502, \"Failed to fetch remote media\")\n\n except HttpResponseException as e:\n logger.warning(\n \"HTTP error fetching remote media %s/%s: %s\",\n server_name,\n media_id,\n e.response,\n )\n if e.code == twisted.web.http.NOT_FOUND:\n raise e.to_synapse_error()\n raise SynapseError(502, \"Failed to fetch remote media\")\n\n except SynapseError:\n logger.warning(\n \"Failed to fetch remote media %s/%s\", server_name, media_id\n )\n raise\n except NotRetryingDestination:\n logger.warning(\"Not retrying destination %r\", server_name)\n raise SynapseError(502, \"Failed to fetch remote media\")\n except Exception:\n logger.exception(\n \"Failed to fetch remote media %s/%s\", server_name, media_id\n )\n raise SynapseError(502, \"Failed to fetch remote media\")\n\n await finish()\n\n if b\"Content-Type\" in headers:\n media_type = headers[b\"Content-Type\"][0].decode(\"ascii\")\n else:\n media_type = \"application/octet-stream\"\n upload_name = get_filename_from_headers(headers)\n time_now_ms = self.clock.time_msec()\n\n # Multiple remote media download requests can race (when using\n # multiple media repos), so this may throw a violation constraint\n # exception. If it does we'll delete the newly downloaded file from\n # disk (as we're in the ctx manager).\n #\n # However: we've already called `finish()` so we may have also\n # written to the storage providers. This is preferable to the\n # alternative where we call `finish()` *after* this, where we could\n # end up having an entry in the DB but fail to write the files to\n # the storage providers.\n await self.store.store_cached_remote_media(\n origin=server_name,\n media_id=media_id,\n media_type=media_type,\n time_now_ms=self.clock.time_msec(),\n upload_name=upload_name,\n media_length=length,\n filesystem_id=file_id,\n )\n\n logger.info(\"Stored remote media in file %r\", fname)\n\n media_info = {\n \"media_type\": media_type,\n \"media_length\": length,\n \"upload_name\": upload_name,\n \"created_ts\": time_now_ms,\n \"filesystem_id\": file_id,\n }\n\n return media_info",
"def get_metadata(self, file_id):\n pass",
"def get_metadata(session, url, filelist):\n metadata = {}\n # Loop over the Clowder dataset image ID list\n for clowder_img in filelist.json():\n # Get metadata for the image from Clowder\n response = clowder_file_metadata(session, url, clowder_img['id'])\n # Metadata from multiple extractors may be present\n for extractor in response.json():\n # Find the extractor called \"deprecatedapi\" which refers to the API used to upload metadata\n if \"user_id\" in extractor['agent']:\n # Save a few metadata elements for convenience\n camera_type = extractor['content']['camera_type']\n perspective = extractor['content']['perspective']\n rotation_angle = extractor['content']['rotation_angle']\n # Store the image ID for later use\n extractor['img_id'] = clowder_img['id']\n if camera_type not in metadata:\n metadata[camera_type] = {}\n if perspective not in metadata[camera_type]:\n metadata[camera_type][perspective] = {}\n metadata[camera_type][perspective][rotation_angle] = extractor\n\n return metadata",
"def get_metadata(key=''):\n response, content = httplib2.Http().request(\n '%s/%s' % (METADATA_BASE_URL, key),\n headers={'Metadata-Flavor': 'Google'},\n method='GET',\n )\n if response['status'] == '404':\n raise NotFoundError(response, content)\n return content",
"def metadata_get(self, endpoint_name=None, key=None):\n if endpoint_name is None:\n _, body = self.request('/v1.1/endpoint/metadata/%s' % key, 'GET')\n else:\n _, body = self.request('/v1.1/endpoints/%s/metadata/%s' % (endpoint_name, key), 'GET')\n return body"
] |
[
"0.743946",
"0.6923274",
"0.638175",
"0.6271817",
"0.6228844",
"0.62113637",
"0.6208526",
"0.6199531",
"0.6125181",
"0.61230326",
"0.6048329",
"0.5846718",
"0.5833795",
"0.58308524",
"0.5800759",
"0.57391906",
"0.5727758",
"0.5712721",
"0.5698382",
"0.5687096",
"0.5684791",
"0.56756747",
"0.5666228",
"0.5661433",
"0.5652427",
"0.56100446",
"0.55873746",
"0.5584609",
"0.55752695",
"0.5550801"
] |
0.79775
|
0
|
Fetches the metadata local file LOCAL_METADATA_FILE and returns the metadata dict equivalent.
|
def _fetch_current_local_metadata():
if not os.path.exists(LOCAL_METADATA_FILE):
return {}
with open(LOCAL_METADATA_FILE) as f:
return json.loads(f.read())
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def _fetch_current_remote_metadata(conn):\n content = _get(conn, REMOTE_METADATA_FILE)\n metadata = json.loads(content) if content else {}\n return metadata",
"def get_downloads_metadata():\n global _METADATA\n if _METADATA is None:\n _METADATA = yaml.safe_load(resource_string(__name__, \"downloads.yml\"))\n return _METADATA",
"def read_metadata(self, file_in_cache):\n metadata_file = self.get_metadata_file(file_in_cache)\n if self.context.is_file(metadata_file):\n return json.loads(auto_decode(self.context.read_file(metadata_file)))\n else:\n return {}",
"def get_metadata(self, filename):\n return self.execute_json(filename)[0]",
"def read_local_metadata(self, fld: str) -> Optional[str]:\n return self.read_metadata(self.get_obj_label(), fld)",
"def read_metadata_file():\n metadata = None\n if not os.path.isfile(META_DATA_FILE):\n ppg.log_info(\"No metadata found. The earthquake splitting might have not been ran yet.\")\n else:\n ppg.log_info(\"Found metadata file\")\n metadata = pd.read_csv(META_DATA_FILE)\n return metadata",
"async def get_file_metadata(\n location_id: LocationID, file_id: StorageFileID, user_id: UserID\n):",
"def read_metadata(\n filename: Union[Path, str], marker: str = \"---\", **kwargs: Any\n) -> Dict[str, Any]:\n return read_header(filename, marker, **kwargs)[0]",
"def metadata(self):\n return parse_metadata(self.metadata_path())",
"def read_data_from_file(self, local_lookml_project_path: str) -> dict:\n logger.info(\n \"Parsing data from local LookML file {}\".format(\n self.lookml_file_name_and_path\n )\n )\n with open(\n utils.assemble_path(\n local_lookml_project_path, self.lookml_file_name_and_path\n ),\n \"r\",\n ) as lookml_file:\n return lkml.load(lookml_file)",
"def _build_local_metadata_file(files, home=''):\n filepaths = [os.path.join(home, f) for f in files]\n shas = [_get_sha_metadata(f) for f in filepaths]\n metadata = dict(zip(files, shas))\n\n with open(LOCAL_METADATA_FILE, 'w') as f:\n f.write(json.dumps(metadata))",
"def _get_metadata(self, pkg_name):\n pkg_name = urllib.parse.quote(pkg_name, safe='@')\n if self.metadatas.get(pkg_name):\n return self.metadatas.get(pkg_name)\n else:\n url = urllib.parse.urljoin(self.REGISTRY, pkg_name)\n try:\n pkg_metadata = requests.get(url).json()\n self.metadatas[pkg_name] = pkg_metadata\n return pkg_metadata\n except urllib.error.HTTPError as e:\n print('Could not download {} from: {} with error: {}'. format(pkg_name, url, e.msg))\n exit(-1)",
"def fetch_metadata(requests_impl=requests):\n\n print(f'fetching metadata at {Network.METADATA_URL}')\n return requests_impl.get(Network.METADATA_URL).json()",
"def load_metainfo(filename, dependencyLoader=None, extraArgsHandling=InfoKindEl.ADD_EXTRA_ARGS, uri=None):\n path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), \"../../../../nomad-meta-info/meta_info/nomad_meta_info/{}\".format(filename)))\n return loadJsonFile(path, dependencyLoader, extraArgsHandling, uri)",
"def metadata_file(self):\n return self._metadata_file",
"def _get(conn, remote_file, bucket_name=BUCKET_NAME):\n contents = None\n try:\n reply = conn.get(bucket_name, remote_file)\n contents = reply.body\n if reply.http_response.status != 200:\n print 'Failed to fetch current_remote metadata'\n contents = None\n except:\n contents = None\n return contents",
"def _get_metadata(self) -> Metadata:\n manifest = self._get_manifest()\n\n return Metadata(**manifest[\"metadata\"])",
"def _metadata_get(self, path):\n fd = self.fs.open(path, \"r\")\n # TODO iterate instead of assuming file < 4MB\n read_bytes = self.fs.read(fd, 0, 4096 * 1024)\n self.fs.close(fd)\n if read_bytes:\n return json.loads(read_bytes.decode())\n else:\n return None",
"def _load_metadata_from_asset():\n\n with rasterio.Env(AWS_NO_SIGN_REQUEST='YES',\n GDAL_DISABLE_READDIR_ON_OPEN='EMPTY_DIR'):\n with rasterio.open(href) as src:\n # Retrieve metadata stored in COG file\n metadata = src.profile\n metadata.update(src.tags())\n metadata['shape'] = src.shape\n\n # Retrieve COG CRS. Note: these COGs do not appear to have CRS info that can be\n # accessed via the .crs method. If this occurs assume it is in WGS84.\n # All COGs in AWS appear to be projected in WGS84.\n if src.crs is None:\n metadata['crs'] = rasterio.crs.CRS.from_epsg(4326)\n else:\n metadata['crs'] = src.crs\n\n # Compute bounding box, image footprint, and gsd\n bbox, footprint, metadata = _get_geometries(src, metadata)\n\n # Derive some additional metadata from the filename\n fname = os.path.basename(href)\n metadata = _parse_filename(fname, metadata)\n\n return metadata, bbox, footprint",
"def get_metadata (self, name):\n return self.metadata.get(name)",
"def metadata(self):\n if self._open is not None:\n self._init_metadata()\n return self._metadata[self._metadata_root]\n else:\n return None",
"def getlocalconfig(projroot: Path) -> Dict[str, Any]:\n localconfig: Dict[str, Any]\n try:\n with open(Path(projroot, 'config/localconfig.json')) as infile:\n localconfig = json.loads(infile.read())\n except FileNotFoundError:\n localconfig = {}\n return localconfig",
"def read_reference_data():\n return {f:read_local_file(f) for f in os.listdir(DATA_DIR)}",
"def read_local_file(filename):\n import fsspec\n fs = fsspec.filesystem('file')\n\n with fs.open(filename) as f:\n data = loads(f.read())\n\n return data",
"def load_metadata(self, directory: pathlib.Path) -> dict:\n path_to_metadata = directory / (self.name + \".json\")\n\n with open(path_to_metadata) as metadata_file:\n metadata = json.load(metadata_file)\n return metadata",
"def load_metadata(self, directory: pathlib.Path) -> dict:\n path_to_metadata = directory / (self.name + \".json\")\n\n with open(path_to_metadata) as metadata_file:\n metadata = json.load(metadata_file)\n return metadata",
"def load_metadata(self, directory: pathlib.Path) -> dict:\n path_to_metadata = directory / (self.name + \".json\")\n\n with open(path_to_metadata) as metadata_file:\n metadata = json.load(metadata_file)\n return metadata",
"def load(self) -> dict:\n if not os.path.exists(self.file_path):\n logger.error('Could not find meta file {}'.format(self.file_path))\n raise Exception()\n with open(self.file_path, encoding='utf-8') as meta_file:\n return json.loads(meta_file.read())",
"def clowder_file_metadata(session, url, fileid):\n try:\n ret = session.get(posixpath.join(url, \"api/files\", fileid, \"metadata.jsonld\"))\n except session.exceptions.RequestException as e:\n print(e)\n sys.exit(1)\n\n return ret",
"def extract_metadata(self):\n if self.is_generatable_file:\n logger.debug(\"Converting collected details to dict..\")\n if self.metadata_collector:\n self.metadata = MetadataToDict(\n metadata_collector=self.metadata_collector,\n file_import=self.file_import,\n )\n self.metadata.build_integration_dict()"
] |
[
"0.68818545",
"0.6417708",
"0.63122565",
"0.62646693",
"0.62405384",
"0.62042487",
"0.6080648",
"0.6028128",
"0.6001734",
"0.59823257",
"0.5974274",
"0.5929091",
"0.59028894",
"0.5869242",
"0.5845099",
"0.5817974",
"0.5808917",
"0.57907844",
"0.5776373",
"0.5760592",
"0.57372004",
"0.5726373",
"0.5669434",
"0.56670403",
"0.56494546",
"0.56494546",
"0.56494546",
"0.56370044",
"0.5634967",
"0.5623329"
] |
0.84643173
|
0
|
Based on comparison of local and remote metada dictionaries, filter files to retain only the files which doesn't exist on remote metadata dict or have different content and same filename. Also, based on IGNORE_DIRS and IGNORE_EXTENSIONS, filter the net file list.
|
def _filter_file_list(files, local_metadata, remote_metadata):
def _is_tracked(filename, metadata):
"""
Is the filename tracked in the remote metadata dict.
The file may be not even locally tracked yet
"""
current_local_sha = local_metadata.get(filename, None)
current_remote_sha = metadata.get(filename, None)
return current_local_sha is not None \
and current_remote_sha is not None \
and current_local_sha == current_remote_sha
def _is_inside_ignored_dir(filename):
""" Is the filename inside any of the IGNORE_DIRS list """
ignore_dirs = ['./' + x for x in IGNORE_DIRS]
return any([filename.startswith(x) for x in ignore_dirs])
def _has_ignored_extension(filename):
return any([ext in IGNORE_EXTENSIONS
for ext in filename.split('.')[1:]])
files = [f for f in files
if not _is_inside_ignored_dir(f)
and not _has_ignored_extension(f)
and not _is_tracked(f, remote_metadata)]
return files
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def preprocess_raw_remote_files(self, raw_remote_files):\n return [xfile for xfile in raw_remote_files if not xfile.exists()]",
"def find_remote_files(product, date, channel, fs, mesoregion=None):\n if 'L1' in product:\n files = [fs.glob('gcp-public-data-goes-16/' + product + '/' + str(date.year) + '/' +\n '{0:03g}'.format(int(date.strftime('%j'))) +\n '/*/*{mesoregion}*M[36]C'.replace(\"{mesoregion}\", mesoregion) + str(channel) + '*.nc')]\n elif 'L2' in product:\n files = [fs.glob('gcp-public-data-goes-16/' + product + '/' + str(date.year) + '/' +\n '{0:03g}'.format(int(date.strftime('%j'))) +\n '/*/*{mesoregion}*'.replace(\"{mesoregion}\", mesoregion) + str(product) + '*M[36]' + '*.nc')]\n\n files = [y for x in files for y in x]\n\n return files",
"def _get_remote_files(config):\n if \"cache\" in config:\n return config[\"cache\"]\n out = {}\n for project, folder in _remote_folders(config):\n out.update(_project_files(project, folder))\n return out",
"def pre_combine_inventory(self, target, src_files):\n config = self.config\n\n self.stderr.write(f\"Layers detected: {self.layer_names_all}\\n\")\n if self.layer_names_all != self.layer_names_used:\n self.stderr.write(f\"Layers after filter: {self.layer_names_used}\\n\")\n\n # Convert src_files to a set to speed up\n src_files = set(src_files)\n self.target_extra_files = set()\n for (root, dirs, files) in relwalk(target, followlinks=config.follow_symlink):\n for fn in files:\n tgt_file = os.path.join(root, fn)\n if tgt_file not in src_files:\n if fn == CONTROLLED_DIR_MARKER or config.block_files.search(fn):\n continue # pragma: no cover (peephole optimization)\n self.target_extra_files.add(tgt_file)\n return src_files",
"def filter_missing_files(file_names, split_by_client=False, allow_missing_files=True):\n\n if not allow_missing_files:\n return file_names\n\n if split_by_client:\n # filter out missing files and empty clients\n existing_files = [\n [f for f in client_files if os.path.exists(f)] for client_files in file_names]\n existing_files = [\n client_files for client_files in existing_files if client_files]\n else:\n # filter out missing files\n existing_files = [f for f in file_names if os.path.exists(f)]\n return existing_files",
"def checkArchFiles(self, key = None, archName = None, verbose = False):\n\n # Set archive from passed args.\n if key is not None and archName is None:\n archName = self.nbDetails[key]['archName']\n elif key is None and archName is None:\n print('Skipping archive checks, no archive supplied.')\n return None\n\n # Check if file exists on remote\n # Note this returns a list\n archExists = self.checkFiles(archName)\n\n if archExists[0]:\n # Get arch contents from remote via Fabric.\n with self.c.prefix(f\"source {self.hostDefn[self.host]['condaPath']} {self.hostDefn[self.host]['condaEnv']}\"):\n result = self.c.run(f\"python -m zipfile -l {archName}\", hide = True)\n\n # Compare with local lsit\n # archFiles = result.stdout.splitlines()\n # localList = self.nbDetails[key]['pkgFileList'][5:]\n # fileComp = list(set(localList) - set(archFiles)) # Compare lists as sets\n archFiles = [(line.split()[0]) for line in result.stdout.splitlines()[1:]] # Keep file names only (drop header, and file properties)\n localList = self.nbDetails[key]['pkgFileList']\n\n # Test & set relative paths for local files in archive\n localListRel = []\n for fileIn in localList:\n try:\n localListRel.append(Path(fileIn).relative_to(self.hostDefn[self.host]['nbProcDir']).as_posix())\n except ValueError:\n localListRel.append(Path(fileIn).name) # In this case just take file name, will go in archive root\n\n fileComp = list(set(localListRel) - set(archFiles)) # Compare lists as sets\n\n # Results\n print(f\"\\n***Checking archive: {archName}\")\n print(f\"Found {len(archFiles)} on remote. Local list length {len(localList)}.\")\n\n # This will run if fileComp is not an empty list\n if fileComp:\n print(f\"Difference: {len(archFiles) - len(localList)}\")\n print(\"File differences:\")\n print(*fileComp, sep = '\\n')\n\n else:\n print(\"Local and remote file lists match.\")\n\n\n else:\n print(f\"***Missing archive: {archName}\")\n fileComp = None\n\n # Set fileComp\n # Either empty, None or list of differences.\n self.nbDetails[key]['archFileCheck'] = fileComp\n if fileComp:\n self.nbDetails[key]['archFilesOK'] = False\n elif fileComp is None:\n self.nbDetails[key]['archFilesOK'] = False\n else:\n self.nbDetails[key]['archFilesOK'] = True\n\n if verbose:\n print(\"\\n***Local file list:\")\n print(*localListRel, sep='\\n')\n print(\"\\n***Archive file list:\")\n print(*archFiles, sep='\\n')\n\n return localListRel, archFiles, fileComp, result",
"def _filter_mrpack_files(file_list: List[MrpackFile], mrpack_install_options: MrpackInstallOptions) -> List[MrpackFile]:\n filtered_list: List[MrpackFile] = []\n for file in file_list:\n if \"env\" not in file:\n filtered_list.append(file)\n continue\n\n if file[\"env\"][\"client\"] == \"required\":\n filtered_list.append(file)\n if file[\"env\"][\"client\"] == \"optional\" and file[\"path\"] in mrpack_install_options.get(\"optionalFiles\", []):\n filtered_list.append(file)\n\n return filtered_list",
"def org_diff(lst_dicts, media_type, main_server):\n diff_dict = {}\n # todo-me pull posters from connected servers\n\n for mtype in media_type:\n meta_lst = []\n seen = {}\n missing = []\n unique = []\n print('...combining {}s'.format(mtype))\n for server_lst in lst_dicts:\n for item in server_lst[mtype]:\n if mtype == 'movie':\n title = u'{} ({})'.format(item.title, item.year)\n else:\n title = item.title\n\n # Look for duplicate titles\n if title not in seen:\n seen[title] = 1\n meta_lst.append(get_meta(item))\n else:\n # Duplicate found\n if seen[title] >= 1:\n # Go back through list to find original\n for meta in meta_lst:\n if meta['title'] == title:\n # Append the duplicate server's name\n meta['server'].append(item._server.friendlyName)\n thumb_url = '{}{}?X-Plex-Token={}'.format(\n item._server._baseurl, item.thumb, item._server._token)\n meta['thumb'].append(thumb_url)\n seen[title] += 1\n # Sort item list by Plex rating\n # Duplicates will use originals rating\n meta_lst = sorted(meta_lst, key=lambda d: d['rating'], reverse=True)\n diff_dict[mtype] = {'combined': {\n 'count': len(meta_lst),\n 'list': meta_lst}}\n\n print('...finding {}s missing from {}'.format(\n mtype, main_server))\n for item in meta_lst:\n # Main Server name is alone in items server list\n if main_server not in item['server']:\n missing.append(item)\n # Main Server name is absent in items server list\n elif main_server in item['server'] and len(item['server']) == 1:\n unique.append(item)\n diff_dict[mtype].update({'missing': {\n 'count': len(missing),\n 'list': missing}})\n\n print('...finding {}s unique to {}'.format(\n mtype, main_server))\n diff_dict[mtype].update({'unique': {\n 'count': len(unique),\n 'list': unique}})\n\n return diff_dict",
"def files_unchanged(self):\n\n passed = []\n failed = []\n ignored = []\n fixed = []\n could_fix = False\n\n # Check that we have the minimum required config\n required_pipeline_config = {\"manifest.name\", \"manifest.description\", \"manifest.author\"}\n missing_pipeline_config = required_pipeline_config.difference(self.nf_config)\n if missing_pipeline_config:\n return {\"ignored\": [f\"Required pipeline config not found - {missing_pipeline_config}\"]}\n try:\n prefix, short_name = self.nf_config[\"manifest.name\"].strip(\"\\\"'\").split(\"/\")\n except ValueError:\n log.warning(\n \"Expected manifest.name to be in the format '<repo>/<pipeline>'. Will assume it is <pipeline> and default to repo 'nf-core'\"\n )\n short_name = self.nf_config[\"manifest.name\"].strip(\"\\\"'\")\n prefix = \"nf-core\"\n\n # NB: Should all be files, not directories\n # List of lists. Passes if any of the files in the sublist are found.\n files_exact = [\n [\".gitattributes\"],\n [\".prettierrc.yml\"],\n [\"CODE_OF_CONDUCT.md\"],\n [\"LICENSE\", \"LICENSE.md\", \"LICENCE\", \"LICENCE.md\"], # NB: British / American spelling\n [os.path.join(\".github\", \".dockstore.yml\")],\n [os.path.join(\".github\", \"CONTRIBUTING.md\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"bug_report.yml\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"config.yml\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"feature_request.yml\")],\n [os.path.join(\".github\", \"PULL_REQUEST_TEMPLATE.md\")],\n [os.path.join(\".github\", \"workflows\", \"branch.yml\")],\n [os.path.join(\".github\", \"workflows\", \"linting_comment.yml\")],\n [os.path.join(\".github\", \"workflows\", \"linting.yml\")],\n [os.path.join(\"assets\", \"email_template.html\")],\n [os.path.join(\"assets\", \"email_template.txt\")],\n [os.path.join(\"assets\", \"sendmail_template.txt\")],\n [os.path.join(\"assets\", f\"nf-core-{short_name}_logo_light.png\")],\n [os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo_light.png\")],\n [os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo_dark.png\")],\n [os.path.join(\"docs\", \"README.md\")],\n [os.path.join(\"lib\", \"nfcore_external_java_deps.jar\")],\n [os.path.join(\"lib\", \"NfcoreTemplate.groovy\")],\n ]\n files_partial = [\n [\".gitignore\", \".prettierignore\", \"pyproject.toml\"],\n ]\n\n # Only show error messages from pipeline creation\n logging.getLogger(\"nf_core.create\").setLevel(logging.ERROR)\n\n # Generate a new pipeline with nf-core create that we can compare to\n tmp_dir = tempfile.mkdtemp()\n\n # Create a template.yaml file for the pipeline creation\n template_yaml = {\n \"name\": short_name,\n \"description\": self.nf_config[\"manifest.description\"].strip(\"\\\"'\"),\n \"author\": self.nf_config[\"manifest.author\"].strip(\"\\\"'\"),\n \"prefix\": prefix,\n }\n\n template_yaml_path = os.path.join(tmp_dir, \"template.yaml\")\n with open(template_yaml_path, \"w\") as fh:\n yaml.dump(template_yaml, fh, default_flow_style=False)\n\n test_pipeline_dir = os.path.join(tmp_dir, f\"{prefix}-{short_name}\")\n create_obj = nf_core.create.PipelineCreate(\n None, None, None, no_git=True, outdir=test_pipeline_dir, template_yaml_path=template_yaml_path\n )\n create_obj.init_pipeline()\n\n # Helper functions for file paths\n def _pf(file_path):\n \"\"\"Helper function - get file path for pipeline file\"\"\"\n return os.path.join(self.wf_path, file_path)\n\n def _tf(file_path):\n \"\"\"Helper function - get file path for template file\"\"\"\n return os.path.join(test_pipeline_dir, file_path)\n\n # Files that must be completely unchanged from template\n for files in files_exact:\n # Ignore if file specified in linting config\n ignore_files = self.lint_config.get(\"files_unchanged\", [])\n if any([f in ignore_files for f in files]):\n ignored.append(f\"File ignored due to lint config: {self._wrap_quotes(files)}\")\n\n # Ignore if we can't find the file\n elif not any([os.path.isfile(_pf(f)) for f in files]):\n ignored.append(f\"File does not exist: {self._wrap_quotes(files)}\")\n\n # Check that the file has an identical match\n else:\n for f in files:\n try:\n if filecmp.cmp(_pf(f), _tf(f), shallow=True):\n passed.append(f\"`{f}` matches the template\")\n else:\n if \"files_unchanged\" in self.fix:\n # Try to fix the problem by overwriting the pipeline file\n shutil.copy(_tf(f), _pf(f))\n passed.append(f\"`{f}` matches the template\")\n fixed.append(f\"`{f}` overwritten with template file\")\n else:\n failed.append(f\"`{f}` does not match the template\")\n could_fix = True\n except FileNotFoundError:\n pass\n\n # Files that can be added to, but that must contain the template contents\n for files in files_partial:\n # Ignore if file specified in linting config\n ignore_files = self.lint_config.get(\"files_unchanged\", [])\n if any([f in ignore_files for f in files]):\n ignored.append(f\"File ignored due to lint config: {self._wrap_quotes(files)}\")\n\n # Ignore if we can't find the file\n elif not any([os.path.isfile(_pf(f)) for f in files]):\n ignored.append(f\"File does not exist: {self._wrap_quotes(files)}\")\n\n # Check that the file contains the template file contents\n else:\n for f in files:\n try:\n with open(_pf(f), \"r\") as fh:\n pipeline_file = fh.read()\n with open(_tf(f), \"r\") as fh:\n template_file = fh.read()\n if template_file in pipeline_file:\n passed.append(f\"`{f}` matches the template\")\n else:\n if \"files_unchanged\" in self.fix:\n # Try to fix the problem by overwriting the pipeline file\n with open(_tf(f), \"r\") as fh:\n template_file = fh.read()\n with open(_pf(f), \"w\") as fh:\n fh.write(template_file)\n passed.append(f\"`{f}` matches the template\")\n fixed.append(f\"`{f}` overwritten with template file\")\n else:\n failed.append(f\"`{f}` does not match the template\")\n could_fix = True\n except FileNotFoundError:\n pass\n\n # cleaning up temporary dir\n shutil.rmtree(tmp_dir)\n\n return {\"passed\": passed, \"failed\": failed, \"ignored\": ignored, \"fixed\": fixed, \"could_fix\": could_fix}",
"def default_filter(files):\n\n if '1.mkv' in files and '2.mkv' in files and 'Labels.json' in files:\n return True\n\n return False",
"def filter_filelist(files: list, hour_mod: int = 12, min_mod: int = 60) -> list:\n files_restricted = []\n if hour_mod == 0 and min_mod == 0:\n files_restricted.append(sorted(files)[-1])\n else:\n for file in files:\n hour = int(file.split(\"_\")[3][8:10])\n minute = int(file.split(\"_\")[3][10:12])\n if hour % hour_mod == 0 and minute % min_mod == 0:\n files_restricted.append(file)\n logging.debug(f'Remote file added: {file}')\n else:\n logging.debug(f'Remote file ignored: {file}')\n logging.info('Files to be downloaded has been reduced from {} to {}'.format(len(files), len(files_restricted)))\n return files_restricted",
"def search_local_files(filename, data_type, train_or_val, path, data_json):\n files = []\n\n if os.path.exists(path):\n all_files = os.listdir(path)\n for f in all_files:\n if f not in data_json.keys():\n continue\n if filename in f and data_type in f and train_or_val in f:\n dataset_filepath = os.path.join(path, f)\n local_file_md5 = get_file_md5(dataset_filepath)\n dataset_md5 = data_json[f][\"md5\"]\n if local_file_md5 == dataset_md5:\n files.append(f)\n else:\n print(f\"{f} is broken so that cannot partition from it.\")\n return files",
"def test_mirror_filter_packages_nomatch_package_with_spec(tmpdir):\n test_configuration = \"\"\"\\\n[blacklist]\npackages =\n example3>2.0.0\n\"\"\"\n Singleton._instances = {}\n with open(\"test.conf\", \"w\") as testconfig_handle:\n testconfig_handle.write(test_configuration)\n BandersnatchConfig(\"test.conf\")\n for plugin in filter_project_plugins():\n plugin.initialize_plugin()\n m = Mirror(str(tmpdir), mock.Mock())\n m.packages_to_sync = {\"example1\": None, \"example3\": None}\n m._filter_packages()\n assert \"example3\" in m.packages_to_sync.keys()",
"def test_syncer_sync_exclude(temp_data_dirs, syncer):\n tmp_source, tmp_target = temp_data_dirs\n\n syncer.sync_up(\n local_dir=tmp_source,\n remote_dir=\"/test/test_syncer_sync_exclude\",\n exclude=[\"*_exclude*\"],\n )\n syncer.wait()\n\n _download_from_fs_path(\n syncer.storage_filesystem, \"/test/test_syncer_sync_exclude\", tmp_target\n )\n\n # Excluded files should not be found in target\n assert_file(True, tmp_target, \"level0.txt\")\n assert_file(False, tmp_target, \"level0_exclude.txt\")\n assert_file(True, tmp_target, \"subdir/level1.txt\")\n assert_file(False, tmp_target, \"subdir/level1_exclude.txt\")\n assert_file(True, tmp_target, \"subdir/nested/level2.txt\")\n assert_file(False, tmp_target, \"subdir_nested_level2_exclude.txt\")\n assert_file(False, tmp_target, \"subdir_exclude/something/somewhere.txt\")",
"def filterFiles(groupDict, fileList):\n for fl in fileList:\n cleanFile = cleanUpPath(fl)\n dirsList = PurePath(fl).parts\n try:\n # Find the first libs directory.\n index = dirsList.index(\"libs\")\n # Any child of libs directory is a group.\n grp = dirsList[index + 1]\n groupDict[grp].append(cleanFile)\n except ValueError:\n groupDict[GRP_UNFILTERED].append(cleanFile)",
"def test_mirror_filter_packages_match(tmpdir):\n test_configuration = \"\"\"\\\n[blacklist]\nplugins = blacklist_project\npackages =\n example1\n\"\"\"\n Singleton._instances = {}\n with open(\"test.conf\", \"w\") as testconfig_handle:\n testconfig_handle.write(test_configuration)\n BandersnatchConfig(\"test.conf\")\n for plugin in filter_project_plugins():\n plugin.initialize_plugin()\n m = Mirror(str(tmpdir), mock.Mock())\n m.packages_to_sync = {\"example1\": None, \"example2\": None}\n m._filter_packages()\n assert \"example1\" not in m.packages_to_sync.keys()",
"def filter_captured_urls(urls_files, url_list_file):\n captured_urls = load_captured_urls(url_list_file)\n \n to_capture = list(filter(lambda d: d['url'] not in captured_urls, urls_files))\n \n return to_capture",
"def checkMissingFiles(inDir, jsonUrl):\n\n file_list = []\n remote = False\n try:\n file_list = os.listdir(inDir)\n except OSError:\n remote = True\n file_list = eos_ls(inDir)\n\n if file_list == []:\n print \"Directory does not exist or is empty!\"\n return []\n\n total_expected = 0\n missing_files = []\n suspicious_files = []\n recovered_files = []\n\n print 'Found %d files in input directory' % len(file_list)\n print 20*'-'\n\n jsonFile = open(jsonUrl,'r')\n procList = json.load(jsonFile,encoding = 'utf-8').items()\n\n for proc in procList:\n for desc in proc[1]:\n data = desc['data']\n isData = desc.get('isdata',False)\n mctruthmode = desc.get('mctruthmode')\n for d in data:\n dtag = d.get('dtag','')\n split = d.get('split',1)\n\n for segment in range(0,split):\n eventsFile = dtag\n if split > 1:\n eventsFile = dtag + '_' + str(segment)\n if mctruthmode:\n eventsFile += '_filt%d' % mctruthmode\n filename = eventsFile+'.root'\n\n sys.stdout.write('... checking %s' % filename)\n sys.stdout.flush()\n\n total_expected += 1\n\n if not filename in file_list:\n missing_files.append(filename)\n sys.stdout.write('\\033[91m MISSING \\033[0m \\n')\n # sys.stdout.flush()\n continue\n\n rootFileUrl = os.path.join(inDir, filename)\n if remote:\n rootFileUrl = ('root://eoscms//eos/cms/store' +\n rootFileUrl.split('store',1)[1])\n\n recovered, suspicious = False, False\n tfile = TFile.Open(rootFileUrl)\n try:\n if tfile.TestBit(TFile.kRecovered):\n recovered = True\n if tfile.IsZombie():\n suspicious = True\n tfile.Close()\n except AttributeError, ReferenceError:\n suspicious = True\n\n if recovered:\n sys.stdout.write('\\033[93m Recovered \\033[0m \\n')\n recovered_files.append(filename)\n if suspicious:\n sys.stdout.write('\\033[93m Failed to open \\033[0m \\n')\n suspicious_files.append(filename)\n\n sys.stdout.write('\\033[92m OK \\033[0m \\n')\n sys.stdout.flush()\n\n print 20*'-'\n if len(missing_files):\n print \"Missing the following files:\"\n print \"(%d out of %d expected)\"% (len(missing_files), total_expected)\n for filename in missing_files:\n print filename\n else:\n print \"NO MISSING FILES!\"\n print 20*'-'\n if len(suspicious_files):\n print \"Failed to open the following files:\"\n print \"(%d out of %d expected)\"% (len(suspicious_files), total_expected)\n for filename in suspicious_files:\n print filename\n print 20*'-'\n if len(recovered_files):\n print \"The following files are recovered:\"\n print \"(%d out of %d expected)\"% (len(recovered_files), total_expected)\n for filename in recovered_files:\n print filename\n print 20*'-'\n\n return missing_files+suspicious_files+recovered_files",
"def getFileListLocal(dataset,blacklist=[ ],tag=\"\"):\n if '/pnfs/' in dataset:\n tag += \"_pnfs\"\n dataset = '__'.join(dataset.split('/')[-3:])\n filename = \"filelist/filelist_%s%s.txt\"%(dataset.lstrip('/').replace('/','__'),tag)\n filelist = [ ]\n if os.path.exists(filename):\n with open(filename,'r') as file:\n for line in file:\n line = line.rstrip('\\n')\n if line and '#' not in line and line not in blacklist:\n filelist.append(line.rstrip('\\n'))\n return filelist",
"def filter_list(to_process_list):\n log_file_list = [file for file in to_process_list if \"tar\" not in file]\n tar_file_list = [file for file in to_process_list if \"tar\" in file]\n return log_file_list, tar_file_list",
"def diffInLocalFiles():\n\taddedFiles = listdir(globals.LOCAL_SHARED_FILE_SPACE)\t#aka current files\n\tremovedFiles = globals.LOCAL_FILE_LIST\t\t\t\t\t#aka previously recorded files\n\t#TODO: this can be a lot more efficient\n\t\n\t#record files that appear in both lists\n\tcommonFiles = []\n\tfor file in removedFiles:\n\t\tif file in addedFiles:\n\t\t\tcommonFiles.append(file)\n\t\t\t\n\t#remove files that appear in both lists\n\tfor file in commonFiles:\n\t\taddedFiles.remove(file)\n\t\tremovedFiles.remove(file)\n\t\t\n\t#The files remaining in the respective list were either recently added or removed\n\tmessages = []\n\tfor file in removedFiles:\n\t\tmessages.append((globals.REMOVE_FILE, file))\t#these files not longer exist\n\tfor file in addedFiles:\n\t\tmessages.append((globals.ADD_FILE, file))\t\t#these files have been recently added\n\n\t#redefine list of local files\n\tglobals.LOCAL_FILE_LIST = listdir(globals.LOCAL_SHARED_FILE_SPACE)\n\treturn messages",
"def filterImages(files, cfg):\r\n regex = \"\\.(\" + \"|\".join(cfg.image_formats) + \")$\"\r\n #filter(lambda s: re.match(regex, s), files)\r\n return [s for s in files if re.findall(regex, s)]",
"def filter_target_extensions(self, files_dict):\n files_filtered = defaultdict(list)\n supported_formats = self.sox_get_supported_formats()\n logging.info('Filtering audio files ...')\n paths = list(files_dict.keys())\n\n for path in paths:\n if not path.endswith('letmehear'):\n files = sorted(files_dict[path])\n for f in files:\n if os.path.splitext(f)[1].lstrip('.').lower() in supported_formats:\n files_filtered[path].append(f)\n return files_filtered",
"def list_backupable_files(files, config, file_filter):\n # For each file used by the application\n backupable_files = []\n for _filename in list(files):\n for filename in glob.glob(_filename):\n # print(filename)\n\n # ignore the user defined files\n if any(re.match(ignore, filename) for ignore in config.ignores):\n continue\n\n status = None\n # check for backuped files given from pipe:\n if filename in config.backuped_files:\n status = Status.EXISTS\n\n # If the file exists and is not already a link pointing to Original file\n if status is None:\n status = file_filter.get_status(filename)\n\n if status is None:\n status = Status.NOT_EXISTS\n\n backupable_files.append([status, filename])\n return backupable_files",
"def collect():\n\n # Get database.\n with open(local_directory(path='file_diffs/packages.json'), 'r') as f:\n store = json.load(f)\n\n # UI.\n print('Checking files for differences...\\n')\n\n # Iterate database.\n for package_name in store:\n # Package variables.\n package_dir = os.path.join(package_directory, package_name)\n package = store[package_name]\n\n # Recursive (lazy) package searching.\n if type(package) == str:\n package = os.path.expanduser(package)\n for dirpath, dirnames, filenames in os.walk(package):\n for filename in filenames:\n sub_package_dir = package_dir + dirpath.replace(package, '')\n if not os.path.exists(sub_package_dir):\n os.makedirs(sub_package_dir)\n\n fp_local = os.path.join(dirpath, filename)\n fp_remote = os.path.join(sub_package_dir, filename)\n\n cs_local = file_checksum(fp=fp_local)\n cs_remote = file_checksum(fp=fp_remote)\n\n if cs_remote != cs_local:\n print('Found: {}/{}'.format(package_name, filename))\n shutil.copyfile(src=fp_local, dst=fp_remote)\n\n # Manual package searching.\n if type(package) == list:\n for fp in package:\n fn_local = fp['local']\n fn_remote = fp['remote']\n\n fp_local = os.path.expanduser(fn_local)\n fp_remote = os.path.join(package_dir, fn_remote)\n\n cs_local = file_checksum(fp=fp_local)\n cs_remote = file_checksum(fp=fp_remote)\n\n if cs_remote != cs_local:\n print('Found: {}/{}'.format(package_name, fn_remote))\n\n remote_dir_path = '/'.join(fp_remote.split('/')[:-1])\n if not os.path.exists(remote_dir_path):\n os.makedirs(remote_dir_path)\n shutil.copyfile(src=fp_local, dst=fp_remote)",
"def compare_remote_elements(manifest_e1: Element, manifest_e2: Element,\n ignored_attrs: Set[str]) -> ChangeMap:\n return xml_diff.compare_subelements(\n tag='remote',\n p1=manifest_e1,\n p2=manifest_e2,\n ignored_attrs=ignored_attrs,\n key_fn=lambda x: x.get('name'),\n diff_fn=xml_diff.attribute_changes)",
"def _filter_diff(diff, include_list, exclude_list=()):\n filtered = []\n for d in diff:\n if (d.status != 'D' and\n _match_regex_list(d.file, include_list) and\n not _match_regex_list(d.file, exclude_list)):\n # We've got a match!\n filtered.append(d)\n return filtered",
"def localfiles_for_update(self, localfiles, obsfiles):\n upload_local_files = []\n obs_dict = {}\n for key, mtime, size in obsfiles:\n obs_dict[key.strip('/')] = mtime\n\n for localfile in localfiles:\n filepath, key = localfile\n fullkey = key + '/' + os.path.basename(filepath)\n fullkey = fullkey.strip('/')\n if fullkey in obs_dict.keys():\n localfile_timestamp = os.path.getmtime(filepath)\n obsfile_timestamp = time.mktime(time.strptime(obs_dict[fullkey], \"%Y/%m/%d %H:%M:%S\"))\n\n if localfile_timestamp > obsfile_timestamp:\n upload_local_files.append(localfile)\n else:\n upload_local_files.append(localfile)\n return upload_local_files",
"def _is_remote_reusable(inputs, calculation):\n can_use_remote = False\n #If no charge density file is available to restart from the calculation will except\n #with a not nice error message. So we can only reuse the charge density if these files are available\n retrieved_filenames = calculation.base.links.get_outgoing().get_node_by_label('retrieved').list_object_names()\n if any(file in retrieved_filenames for file in (\n 'cdn_last.hdf',\n 'cdn1',\n )):\n can_use_remote = True\n\n if 'fleurinp' in inputs:\n modes = inputs.fleurinp.get_fleur_modes()\n if modes['force_theorem'] or modes['dos'] or modes['band']:\n # in modes listed above it makes no sense copying cdn.hdf\n can_use_remote = False\n # without fleurinp it is harder to extract modes in this case\n # - simply try to reuse cdn.hdf and hope it works\n\n return can_use_remote",
"def prefilter_json_files_then_compare(args):\n\n logging.info(\"prefilter_json_files_then_compare: starting!\")\n with open(args.initialFile) as f:\n json_initial = file.read(f)\n with open(args.finalFile) as f2:\n json_final = file.read(f2)\n\n patch = jsonpatch.JsonPatch.from_diff(json_initial, json_final)\n logging.info(\n \"prefilter_json_files_then_compare:differences before patching: %d\",\n len(list(patch)),\n )\n\n json_initial_filtered = prefilter(json_initial, args.initial_prefilter)\n json_final_filtered = prefilter(json_final, args.finalPreFilter)\n\n patch_after_filtering = jsonpatch.JsonPatch.from_diff(\n json_initial_filtered, json_final_filtered\n )\n differences_after_patching = list(patch_after_filtering)\n logging.info(\n \"prefilter_json_files_then_compare: differences after patching: %d\",\n len(differences_after_patching),\n )\n\n if args.printDifferences:\n for patchline in differences_after_patching:\n print(json.dumps(patchline))\n\n print(len(differences_after_patching))\n return len(differences_after_patching)"
] |
[
"0.6446468",
"0.59767133",
"0.5730664",
"0.5725296",
"0.5656467",
"0.5532549",
"0.55086225",
"0.55084115",
"0.5492199",
"0.5476868",
"0.54651487",
"0.5445001",
"0.5409476",
"0.54082286",
"0.53627706",
"0.53226095",
"0.5322206",
"0.53167725",
"0.5312311",
"0.52992505",
"0.52837795",
"0.52400154",
"0.5230779",
"0.52141875",
"0.52138233",
"0.52003235",
"0.5195438",
"0.51881725",
"0.5178879",
"0.51775926"
] |
0.74489295
|
0
|
Is the filename tracked in the remote metadata dict. The file may be not even locally tracked yet
|
def _is_tracked(filename, metadata):
current_local_sha = local_metadata.get(filename, None)
current_remote_sha = metadata.get(filename, None)
return current_local_sha is not None \
and current_remote_sha is not None \
and current_local_sha == current_remote_sha
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def is_remote_cached(cls, target_filename):\n is_cached = None\n cache = cls.CACHE_BACKEND()\n for file_name, file_id in cache.search():\n if file_name == os.path.basename(target_filename):\n is_cached = file_id\n logger.debug('File %r already cached at %r', target_filename, cls.CACHE_BACKEND)\n break\n return is_cached",
"def is_local(self):\n try:\n return os.path.isfile(self.get_absolute_path())\n except ValueError:\n logger.error(\"'%s' is not a file\", self.get_absolute_path())\n except TypeError: # no datafile available or file does not exist\n pass\n return False",
"def _fileinfo_has_changed(self, metadata_filename, new_fileinfo):\n \n # If there is no fileinfo currently stored for 'metadata_filename',\n # try to load the file, calculate the fileinfo, and store it.\n if metadata_filename not in self.fileinfo:\n self._update_fileinfo(metadata_filename)\n\n # Return true if there is no fileinfo for 'metadata_filename'.\n # 'metadata_filename' is not in the 'self.fileinfo' store\n # and it doesn't exist in the 'current' metadata location.\n if self.fileinfo.get(metadata_filename) is None:\n return True\n\n current_fileinfo = self.fileinfo[metadata_filename]\n\n if current_fileinfo['length'] != new_fileinfo['length']:\n return True\n\n # Now compare hashes. Note that the reason we can't just do a simple\n # equality check on the fileinfo dicts is that we want to support the\n # case where the hash algorithms listed in the metadata have changed\n # without having that result in considering all files as needing to be\n # updated, or not all hash algorithms listed can be calculated on the\n # specific client.\n for algorithm, hash_value in new_fileinfo['hashes'].items():\n # We're only looking for a single match. This isn't a security\n # check, we just want to prevent unnecessary downloads.\n if hash_value == current_fileinfo['hashes'][algorithm]:\n return False\n\n return True",
"def has_metadata(self):\n if self.mimetype in Config.mimes_metadata:\n return True\n return False",
"def isBasedInHiddenFile(self):\n #type: () -> Optional[bool]\n return (\n None if self.realFileName is None #if before\n else self.realFileName != self.fileName\n )",
"def has_filename(self):\n if self.filename == \"untitled\":\n return False\n else:\n return True",
"def has_local_tails_file(self) -> bool:\n tails_file_path = Path(self.get_receiving_tails_local_path())\n return tails_file_path.is_file()",
"def exists(self):\n\n return os.path.exists(self[\"~filename\"])",
"def local(self):\r\n return self._url.scheme in ('', 'file')",
"def remote(self):\n return self.getItunesAttribute('Track Type') == 'Remote'",
"def check_for_file(self):\n if self.task.file_name in os.listdir(self.task.file_storage):\n return True\n return False",
"def file_exists(self):\n if os.path.isfile(self.file_name):\n return True\n else:\n return False",
"def file_present(self,imagefile=None):\n import hashlib\n if self.filesize()==0:\n return False # empty files are never present\n if imagefile==None:\n imagefile=self.imagefile # use this one\n for hashname in ['md5','sha1']:\n oldhash = self.tag(hashname)\n if oldhash:\n newhash = hashlib.new(hashname,self.contents(imagefile=imagefile)).hexdigest()\n return oldhash==newhash\n raise ValueError,\"Cannot process file \"+self.filename()+\": no hash in \"+str(self)",
"def has_file(self, name):\n return bool(self.input(name).__class__.__name__ == 'cgi_FieldStorage')",
"def is_local_file(string):\n assert isinstance(string, basestring)\n return os.path.isfile(string)",
"def is_new_file(self):\n return self.filename is None",
"def is_remote(path: Text) -> bool:\n\n # TODO(Alex): add check for another remote storages (s3, ...) when they will be supported\n if path.startswith('gs://'):\n return True\n\n return False",
"def is_local(self):\n if not \"COLLABORATIVE\" in self._file.upper():\n LOGGER.debug(['AIE4606', 'match_false'], {'file': self._file})\n return True\n else:\n LOGGER.debug(['AIE4607', 'match_true'], {'file': self._file})\n return False\n return self._is_local",
"def has_file(self) -> bool:\n return self._file is not None",
"def is_present(self):\n return self.file_is_present()",
"def _dist_has_meta_data(dist: pkg_resources.Distribution) -> bool:\n return dist.has_metadata('direct_url.json')",
"def is_file_exists(self):\n pass",
"def test_get_file_exists_caching_with_raw_url(self):\n repository = self.remote_repository\n\n self.spy_on(repository._get_file_exists_uncached,\n op=kgb.SpyOpReturn(True))\n\n # Use spy to put key into cache\n self.assertTrue(repository.get_file_exists('PATH', 'd7e96b3'))\n\n # Remove spy to ensure key is still in cache without needing spy\n repository._get_file_exists_uncached.unspy()\n self.assertTrue(repository.get_file_exists('PATH', 'd7e96b3'))\n\n # Does not exist when raw_file_url changed because it is not cached.\n repository.raw_file_url = \\\n 'http://github.com/api/v2/yaml/blob/show/reviewboard/<revision>'\n\n self.assertFalse(repository.get_file_exists('PATH', 'd7e96b3'))",
"def isfile(self):\n return os.path.isfile(self.path)",
"def names_singleton(self):\r\n if self.stream:\r\n return True\r\n else:\r\n return os.path.isfile(self.object_name)",
"def fileProcessed(self,fileInstance):\n if hasattr(fileInstance,\"name\"): name=fileInstance.name\n elif hasattr(fileInstance,\"url\"): name=fileInstance.url\n if name in self.emptyFileFlag: return self.emptyFileFlag[name]\n else: return False",
"def in_file(self):\n return self.on_disk and not self.in_cached_file",
"def exists(self):\n result = super().exists()\n if result:\n logger.debug(\"Found local file or directory %s\", self.path)\n else:\n logger.warning(\"Cannot find local file or directory %s\", self.path)\n return result",
"def __check_metadata(s3client, key, bucket_name):\n response = s3client.head_object(Bucket=bucket_name, Key=key)\n if 'status' in response['Metadata']:\n return response['Metadata']['status'] == 'uploaded'\n return False",
"def has_file(self, name):\n return name in self.files"
] |
[
"0.7122534",
"0.6666199",
"0.6614642",
"0.6477976",
"0.6251149",
"0.6231352",
"0.61951226",
"0.6183894",
"0.6110177",
"0.6092684",
"0.6037549",
"0.6031686",
"0.60248667",
"0.6003988",
"0.598465",
"0.59665877",
"0.59620124",
"0.594347",
"0.5927977",
"0.58981687",
"0.58882433",
"0.5848515",
"0.5846766",
"0.5826577",
"0.582648",
"0.5804992",
"0.5803461",
"0.58014953",
"0.57973117",
"0.57946914"
] |
0.811574
|
0
|
Is the filename inside any of the IGNORE_DIRS list
|
def _is_inside_ignored_dir(filename):
ignore_dirs = ['./' + x for x in IGNORE_DIRS]
return any([filename.startswith(x) for x in ignore_dirs])
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def ignore(ignored_dirs, path):\n return any([normpath(path).startswith(ignore_dir) for ignore_dir in ignored_dirs])",
"def ignore(directory):\n for pattern in ignore_patterns:\n if pattern in directory:\n return True\n\n return False",
"def _is_ignored(self, full_path):\n for ignor in self._ignored:\n if fnmatch.fnmatch(full_path, \"*/\" + ignor):\n return True\n return False",
"def ignore_patterns(self, relpath):\n names = relpath.split('/')\n for name in names:\n for pattern in self.ignore:\n if fnmatch.fnmatch(name, pattern):\n return True\n return False",
"def is_dir_ignored_file(file_name, cfg):\n if file_name:\n for pattern in cfg.options.dir_ignored_files:\n if fnmatch.fnmatch(file_name, pattern):\n return True\n return False",
"def check_if_file_is_ignored(file_path):\n path_parts = file_path.split('/')\n\n for part in path_parts:\n if part in INGORED_PATHS:\n return True",
"def _should_skip_file(path):\n for pattern in IGNORE_PATTERN_LIST:\n if pattern in path:\n return True\n\n return False",
"def is_ignored(file, ignored):\n return any(i in PurePath(path.abspath(file)).parts for i in ignored)",
"def dirname_filter ( self, dirname, _fnmatch=fnmatch.fnmatch ):\n return all (\n not _fnmatch ( dirname, pat ) for pat in self.DIRNAMES_IGNORE\n )",
"def dir_excluded(path):\n\tname = os.path.basename(path)\n\t# skip any dirs which start with . (dot) and in EXCLUDED_DIRS\n\tif name.startswith('.') and u'.*' in EXCLUDED_DIRS:\n\t\treturn True\n\t# skip any dirs in EXCLUDED_DIRS\n\tif name in EXCLUDED_DIRS or path in EXCLUDED_DIRS:\n\t\treturn True\n\t# skip any dirs that are found in reg exp checks including wildcard searches\n\tfound_dir = False\n\tfound_path = False\n\tfor d in EXCLUDED_DIRS:\n\t\tif d == '.*':\n\t\t\tcontinue\n\t\tif d.startswith('*') and d.endswith('*'):\n\t\t\td = d.replace('*', '')\n\t\t\tif re.search(d, name):\n\t\t\t\tfound_dir = True\n\t\t\t\tbreak\n\t\t\telif re.search(d, path):\n\t\t\t\tfound_path = True\n\t\t\t\tbreak\n\t\telif d.startswith('*'):\n\t\t\td = d + '$'\n\t\t\tif re.search(d, name):\n\t\t\t\tfound_dir = True\n\t\t\t\tbreak\n\t\t\telif re.search(d, path):\n\t\t\t\tfound_path = True\n\t\t\t\tbreak\n\t\telif d.endswith('*'):\n\t\t\td = '^' + d\n\t\t\tif re.search(d, name):\n\t\t\t\tfound_dir = True\n\t\t\t\tbreak\n\t\t\telif re.search(d, path):\n\t\t\t\tfound_path = True\n\t\t\t\tbreak\n\t\telse:\n\t\t\tif d == name:\n\t\t\t\tfound_dir = True\n\t\t\t\tbreak\n\t\t\telif d == path:\n\t\t\t\tfound_path = True\n\t\t\t\tbreak\n\n\tif found_dir or found_path:\n\t\treturn True\n\n\treturn False",
"def included(path):\n if path.endswith(Env.IGNORED_TEST_DIRS):\n return False\n return path.endswith('.py') or os.path.isdir(path)",
"def _ignore(path):\n return any(re.match(pattern, path) for pattern in ignore)",
"def test_file_paths(self, site):\n \n if site.home_page.contains_any_pattern(\n ['/etc/designs/','/libs/cq/', '/libs/wcm/', '/content/dam/']\n ):\n return 1\n else:\n return 0",
"def FilterDirectory(dirpath, filenames):\n if not dirpath or not filenames:\n return False\n for no_crawl_dir in NO_CRAWL_DIRS:\n if no_crawl_dir in dirpath:\n return False\n return True",
"def should_ignore_path(path):\n for p in config.compiled_ignore_patterns:\n if p.match(path):\n return True\n return False",
"def exclude_filter(path):\n for ignore in IGNORE:\n if fnmatch(path, osp.join(SRC, ignore)): # in ignore list\n return True\n else:\n if osp.isdir(path) or osp.splitext(path)[1] != '.md':\n return False\n with open(path) as f:\n firstline = f.readline()\n return firstline.startswith('```{include}') # duplicate file",
"def test_find_not_should_ignore_path_regexp(self, tmp_path):\n plugin_folder_path = populate_dir(tmp_path)\n\n detected_files = set()\n should_ignore_files = {\n \"test_notload.py\",\n \"test_notload_sub.py\",\n \"test_noneload_sub1.py\",\n \"test_shouldignore.py\",\n \".airflowignore_glob\",\n }\n should_not_ignore_files = {\n \"test_load.py\",\n \"test_load_sub1.py\",\n }\n ignore_list_file = \".airflowignore\"\n for file_path in find_path_from_directory(plugin_folder_path, ignore_list_file):\n file_path = Path(file_path)\n if file_path.is_file() and file_path.suffix == \".py\":\n detected_files.add(file_path.name)\n assert detected_files == should_not_ignore_files\n assert detected_files.isdisjoint(should_ignore_files)",
"def is_in_directory(f):\n f = os.path.dirname(f) + os.path.sep\n return any(f.startswith(d) for d in dirs_to_group)",
"def _is_in_excluded_patterns(self, path):\n for excluded_pattern in self._excluded_patterns:\n expandeduser_pattern = os.path.expanduser(excluded_pattern)\n if (path + '/').find(expandeduser_pattern) != -1:\n return True\n return False",
"def folder_filter(folder_name):\n excluded_folders = get_setting('excluded_folders', [])\n folder_name = folder_name.rstrip(os.sep) + os.sep\n return True not in [exc in folder_name for exc in excluded_folders]",
"def contains_files(self):\n if self.file_list is None:\n self._set_file_list()\n for individual_file in self.file_list:\n if not os.path.exists(os.path.join(self.base_dir, individual_file)):\n return False\n return True",
"def path_excluded(self,path):\n\t\tfor pattern in self.excludes['file_exclude']:\n\t\t\tif pattern in path:\n\t\t\t\t#print \" \u001b[41mExcluding:\u001b[m\",path\n\t\t\t\treturn True\n\t\treturn False",
"def ignore_path(path, ignore_list=None, whitelist=None):\n if ignore_list is None:\n return True\n\n should_ignore = matches_glob_list(path, ignore_list)\n if whitelist is None:\n return should_ignore\n\n return should_ignore and not matches_glob_list(path, whitelist)",
"def skip(*filenames):\r\n for filename in filenames:\r\n if not os.path.isfile(filename):\r\n return False\r\n return True",
"def test_find_not_should_ignore_path_glob(self, tmp_path):\n plugin_folder_path = populate_dir(tmp_path)\n\n detected_files = set()\n should_ignore_files = {\n \"test_notload.py\",\n \"test_notload_sub.py\",\n \"test_noneload_sub1.py\",\n \"test_shouldignore.py\",\n }\n should_not_ignore_files = {\n \"test_load.py\",\n \"test_load_sub1.py\",\n }\n ignore_list_file = \".airflowignore_glob\"\n for file_path in find_path_from_directory(plugin_folder_path, ignore_list_file, \"glob\"):\n file_path = Path(file_path)\n if file_path.is_file() and file_path.suffix == \".py\":\n detected_files.add(file_path.name)\n assert detected_files == should_not_ignore_files\n assert detected_files.isdisjoint(should_ignore_files)",
"def _include_directory(self, root_parts):\n # include root\n if len(root_parts) == 0:\n return True\n\n # don't include lwc tests\n if root_parts[0] == \"lwc\" and any(part.startswith(\"__\") for part in root_parts):\n return False\n\n # include everything else\n return True",
"def in_folder(self):\n return len(os.path.split(self.file_path)) > 1",
"def blocks(self, dirs):\n return any([d in self.directories for d in dirs])",
"def checkForFile(self, filename:str):\n\t\tfor item in os.listdir(self.getPath()):\n\t\t\tif filename in item:\n\t\t\t\treturn True\n\t\treturn False",
"def dir_filter(item):\n return not item.startswith(\"_\")"
] |
[
"0.7432789",
"0.742647",
"0.7269846",
"0.7173609",
"0.7124053",
"0.6995675",
"0.69607145",
"0.6938394",
"0.69038445",
"0.68344337",
"0.6772172",
"0.6730275",
"0.6686163",
"0.65734845",
"0.6516576",
"0.6510022",
"0.6423632",
"0.6392135",
"0.637968",
"0.6378461",
"0.63330704",
"0.6315168",
"0.6250827",
"0.6184499",
"0.6183902",
"0.61769605",
"0.61649156",
"0.61556274",
"0.6104933",
"0.6100495"
] |
0.81426775
|
0
|
Walks through all the subfolders in static_root, and uploads everything valid found to S3. If Gzip is enabled, also tries to compress and upload the compressed version of the static asset.
|
def upload_all_to_s3(static_root):
conn = _get_connection()
files = _get_file_list(static_root)
_build_local_metadata_file(files, home=static_root)
local_metadata = _fetch_current_local_metadata()
remote_metadata = _fetch_current_remote_metadata(conn)
files_to_upload = _filter_file_list(files, local_metadata, remote_metadata)
start_time = time.time()
print 'Upload start: Landing in BUCKET_NAME: %s' % BUCKET_NAME
for f in files_to_upload:
#Upload to Bucket
upload_file(conn, os.path.join(static_root, f), f)
#Upload Gzip css/js version if gzip is enabled
can_be_gzipped = _file_can_be_compressed(os.path.join(static_root, f))
if GZIP_ENABLED and can_be_gzipped:
upload_file(conn, os.path.join(static_root, f), f, gzip=True)
#Extra files
if EXTRA_FILES:
print 'Now, uploading extra files outside public/static'
for filename_local, filename_s3 in EXTRA_FILES.items():
upload_file(conn, filename_local, filename_s3)
end_time = time.time()
print 'Upload finished: \
Time elapsed: %s s' % round(end_time - start_time, 3)
# refresh metadata file on the server
print 'Uploading local metadata file'
upload_file(conn, LOCAL_METADATA_FILE, REMOTE_METADATA_FILE)
print 'Uploading process DONE'
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def s3_sync(s3_bucket, s3_prefix, sync_path=\".\"):\n # Get bucket\n s3_resource = boto3.resource(\"s3\")\n bucket = s3_resource.Bucket(s3_bucket)\n\n # Walk paths and subdirectories, uploading files\n for path, subdirs, files in os.walk(sync_path):\n # Get relative path prefix\n relpath = os.path.relpath(path, sync_path)\n if not relpath.startswith('.'):\n prefix = os.path.join(s3_prefix, relpath)\n else:\n prefix = s3_prefix\n\n for file in files:\n file_key = os.path.join(prefix, file)\n bucket.upload_file(os.path.join(path, file), file_key)",
"def upload_child_objects(self, local_dir_path, s3_dir_path, recursive=False, fn_pattern=None):\n child_objects = [os.path.join(local_dir_path, f) for f in os.listdir(local_dir_path)]\n child_files = [f for f in child_objects if os.path.isfile(f)]\n child_dirs = [f for f in child_objects if os.path.isdir(f)]\n\n for child_file in child_files:\n if not fn_pattern or fnmatch.fnmatch(child_file, fn_pattern):\n s3_object_path = os.path.join(s3_dir_path, os.path.basename(child_file))\n logging.debug(\"Uploading \\\"{}\\\" to \\\"{}\\\"\".format(child_file, s3_object_path))\n self.upload_object(child_file, s3_object_path)\n\n if recursive:\n for child_dir_local in child_dirs:\n child_dir_s3 = os.path.join(s3_dir_path, os.path.basename(child_dir_local))\n self.upload_child_objects(child_dir_local, child_dir_s3, recursive, fn_pattern)",
"def copy_static_resources(self):\n if not hasattr(settings, 'STATIC_ROOT'):\n raise MissingStaticRoot()\n destination = os.path.join(STORAGE_PATH, 'static')\n if os.path.exists(destination):\n shutil.rmtree(destination)\n shutil.copytree(settings.STATIC_ROOT, destination)",
"def cp_static_files(self,inpath,outpath): \n if inpath==self.static_dir:\n dest=os.path.join(outpath,os.path.basename(inpath))\n if os.path.exists(dest):\n logger.warning('Remove old static folder')\n shutil.rmtree(dest) #not efficient. Should do it incrementaly...\n logger.info('cp_static_files %s -> %s' %(inpath,dest))\n copyfiles(inpath,dest) \n else:\n for folder in os.listdir(inpath):\n if folder == 'static':\n logger.info('found static folder, copy all...')\n dest=os.path.join(outpath,folder)\n src=os.path.join(inpath,folder)\n if os.path.exists(dest):\n logger.warning('Remove old static folder')\n shutil.rmtree(dest) #not efficient. Should do it incrementaly...\n logger.info('cp_static_files %s -> %s' %(src,dest))\n copyfiles(src,dest)\n return 0",
"def deploy_static(): \n from fabdeploy.django import collectstatic as django_collectstatic\n# run(\"rm -rf %(root_path)s%(project_name)s/static/*\" % env) # call again git_add_commit_pull\n django_collectstatic()",
"def copy_static(root_directory, dist_directory, sdk_directory):\n\n for static in configuration.STATICS:\n context = {\n \"root\": root_directory,\n \"sdk\": sdk_directory,\n \"dist\": dist_directory\n }\n\n source = templates.from_string(static[\"source\"], context)\n target = templates.from_string(static[\"target\"], context)\n target = os.path.join(dist_directory, target)\n\n # Perform the action.\n sys.stdout.write(\"Copying '%s'\\n\" % source)\n\n if static[\"type\"] == \"directory\":\n recursive_overwrite(source, target)\n else:\n shutil.copy(source, target)",
"def upload_images_to_s3(directory):\n for f in directory.iterdir():\n if str(f).endswith(('.png', '.jpg', '.jpeg')):\n full_file_path = str(f.parent) + \"/\" + str(f.name)\n file_name = str(f.name)\n s3_client.upload_file(full_file_path, BASE_BUCKET, file_name)\n print(f,\"put\")",
"def deploy_to_s3():\n env.gzip_path = '%(path)s/repository/gzip/assets/' % env\n run(('s3cmd -P --add-header=Content-encoding:gzip --guess-mime-type --rexclude-from=%(path)s/repository/s3exclude sync %(gzip_path)s s3://%(s3_bucket)s/%(project_name)s/') % env)",
"def upload_json_to_s3(directory):\n for f in directory.iterdir():\n if str(f).endswith('.json'):\n full_file_path = str(f.parent) + \"/\" + str(f.name)\n file_name = str(f.name)\n s3_client.upload_file(full_file_path, BASE_BUCKET, file_name)",
"def _upload_dir_to_bucket(self, path, ext_path):\n for file in os.listdir(path):\n self._upload_to_bucket(path+'/'+file, ext_path+'/'+file)",
"def upload_to_s3(site, bucket, directory=None, files=None, prefix=None):\n if bucket is None:\n print red('Error: Bucket must be specified.')\n return\n if directory is None and files is None:\n print red('Error: Directory and/or files must be specified.')\n return\n # Setup boto\n import boto\n from boto.s3.bucket import Bucket\n from boto.s3.key import Key\n import mimetypes\n import fnmatch\n\n setup_aws_access_key(site)\n\n # Connect to S3\n c = boto.connect_s3()\n b = Bucket(c, bucket)\n\n # Fix the prefix\n # prefix itself shouldn't have a / prefix itself but should end with /\n if prefix:\n prefix = prefix.lstrip('/')\n if prefix and not prefix.endswith('/'):\n prefix = prefix + '/'\n\n def __upload(key, filename):\n k = Key(b)\n k.key = key\n headers = {}\n content_type = mimetypes.guess_type(filename)[0]\n if site.has_key('webapp') and site['webapp'].get('cache_control'):\n for pattern in site['webapp']['cache_control']:\n if fnmatch.fnmatch(filename, pattern):\n headers['Cache-Control'] = site['webapp']['cache_control'][pattern]\n break\n if site.has_key('webapp') and site['webapp'].get('gzip_types') and content_type in site['webapp']['gzip_types']:\n from gzip import GzipFile\n from StringIO import StringIO\n # Need to specify content_type when uploading from a string!\n headers['Content-Type'] = content_type\n headers['Content-Encoding'] = 'gzip'\n s = StringIO()\n g = GzipFile(fileobj=s, mode='wb')\n with open(filename, 'rb') as f:\n g.write(f.read())\n g.close()\n k.set_contents_from_string(s.getvalue(), headers)\n else:\n k.set_contents_from_filename(filename, headers)\n\n if files:\n # Upload individual files\n if directory:\n keys = [filename.lstrip('/') for filename in files]\n files = [os.path.join(directory, filename) for filename in files]\n else:\n keys = [os.path.split(filename)[1] for filename in files]\n for i, filename in enumerate(files):\n print 'Uploading %s' % keys[i]\n if prefix:\n key = prefix + keys[i]\n else:\n key = keys[i]\n __upload(key, filename)\n elif directory:\n # Upload an entire directory\n def __upload_dir(arg, dirname, names):\n # arg is the starting directory\n for name in names:\n filename = os.path.join(dirname, name)\n if not os.path.isdir(filename) and not os.path.islink(filename) and not name.startswith('.'):\n key = filename[len(arg):]\n if key.startswith('/'):\n key = key[1:]\n if prefix:\n key = prefix + key\n print 'Uploading %s' % key\n __upload(key, filename)\n os.path.walk(directory, __upload_dir, directory)",
"def add_dirs_to_static(static_webapp_name):\n static_dir = '$HOME/webapps/%s' % static_webapp_name\n with settings(warn_only=True):\n with cd(static_dir):\n run(\"mkdir static && mkdir media\")\n run(\"rm index.html\")\n run(\"touch index.html\")\n with cd(code_dir):\n run(\"mkdir %s/static\" % project_name)",
"def upload_handler(self):\n \n for root, dirs, files in os.walk(self.path):\n\n current_dir = os.path.basename(root)\n \n if root == self.path:\n root_id = self.gapy.create_file(current_dir, path=root, isFolder=True)\n else:\n parents_id = self.filesystem[os.path.dirname(root)][\"id\"]\n root_id = self.gapy.create_file(current_dir, path=root, isFolder=True, parents_id=[parents_id])\n print(f\"\\033[94m The directory {current_dir} was uploaded \\033[0m\")\n\n self.filesystem[root.rstrip(\"/\")] = { \"id\": root_id, \"files\": [] }\n \n if files:\n for f in files:\n if f not in IGNORE_FILES and os.path.getsize(root+\"/\"+f) > 0:\n file_id = self.gapy.create_file(f, path=root, parents_id=[root_id])\n self.filesystem[root][\"files\"].append({ \"name\": f, \"id\": file_id})\n print(f\"\\033[94m The file {f} was uploaded \\033[0m\")\n \n self.update_fs()",
"def test_upload_directory_of_directories_to_s3_bucket(self):\n conn = boto3.resource('s3', region_name='us-east-1')\n # We need to create the bucket since this is all in Moto's 'virtual' AWS account\n conn.create_bucket(Bucket='foobucket')\n\n s3_connector = S3Connector()\n s3_connector.connect(\"default\")\n s3_connector.upload_directory(directory_path=\"test/test_resources/test_subdirectory\",\n bucket_name=\"foobucket\", aws_directory=\"test_directory\")\n\n # get bucket contents\n response = boto3.client('s3').list_objects(Bucket=\"foobucket\")\n contents = []\n for content in response.get('Contents', []):\n contents.append(content.get('Key'))\n\n self.assertEqual(\n contents, [\"test_directory/sub/fake\", \"test_directory/sub2/fake\"])",
"def publish():\n reset()\n compress()\n build()\n s3deploy()\n log_success()",
"def update_static_files(self):\n\n params = self.chose_param_value(\"--static\")\n self._check_path_availability([\"get_static_dir\", \"get_static_dir_to\"])\n if self._check_whether_has_params(params):\n self.updater.update_files(\n self.analizer.get_static_dir(),\n self.analizer.get_static_dir_to(),\n params\n )\n return self.write_debug_message(\"Static files upgrade is done!\\n\")\n return self.write_error_message(\"You haven't passed any params about static files\")",
"def upload_files_s3(files, bucket):\n \n print('************************************')\n print('Uploading files to s3 bucket...')\n print('************************************')\n \n for i in range(len(files)):\n upload_file_s3(files[i], bucket)\n \n print('************************************')\n print('Upload complete')\n print('************************************')",
"def copy_files(self):\n if settings.USE_S3_STORAGE:\n self.copy_to_s3()\n else:\n self.copy_to_local()",
"def collect_assets(systems, settings):\r\n for sys in systems:\r\n sh(django_cmd(sys, settings, \"collectstatic --noinput > /dev/null\"))",
"def _upload(self, errors):\n if self.backup_bucket is None:\n return\n\n try:\n with open(\"%s/%s.tar.gz\"%(self.backup_path, self.name), 'r+') as f:\n s3upload.upload_to_s3(f,\n self.backup_bucket,\n \"%s/%s.tar.gz\"%(self.backup_id, self.name))\n\n # Cleaning up resources, since the upload was successful\n run(\"rm -f %s/%s.tar.gz\"%(self.backup_path, self.name))\n except Exception as e:\n logging.exception(e)\n errors.put(Exception(\"Error uploading %s server backup to S3\" % self.name))\n traceback.print_exc()",
"def upload(env):\n if not env:\n click.echo(\"Environment must be specified\")\n click.Abort()\n\n with open(\"zappa_settings.json\", \"r\") as f:\n settings = json.load(f)\n\n if not settings:\n click.echo(\"Settings not loaded\")\n click.Abort()\n return\n\n try:\n s3_bucket = settings[env][\"s3_bucket\"]\n aws_region = settings[env][\"aws_region\"]\n except AttributeError:\n click.echo(\"Failed to get details from settings\")\n click.Abort()\n return\n\n session = boto3.Session()\n credentials = session.get_credentials()\n current_credentials = credentials.get_frozen_credentials()\n\n app.config[\"FLASKS3_FORCE_MIMETYPE\"] = True\n\n try:\n css_assets.build()\n\n flask_s3.create_all(\n app,\n user=current_credentials.access_key,\n password=current_credentials.secret_key,\n bucket_name=s3_bucket,\n location=aws_region,\n put_bucket_acl=False,\n )\n click.echo(\n f\"Uploaded assets to Bucket https://{s3_bucket}.s3.{aws_region}.amazonaws.com\"\n )\n except Exception as e:\n click.echo(f\"Failed to upload assets: {e}\")",
"def uploadFilestoS3(self):\n allfilesuploadedcount = 0\n for eachfiledic in self.fileTobeUploaded:\n if eachfiledic[\"uploadedSuccess\"] == 0: #Means this file never got uploaded.\n if os.path.getsize(eachfiledic[\"filepath\"]) < 1000000000: #<1GB\n s3Log.info (\"FileSize < 1GB for :{}, so using single part upload.\".format(eachfiledic[\"filepath\"]) )\n if self.singlePartUpload(eachfiledic) == True:\n eachfiledic[\"uploadedSuccess\"] = 1\n allfilesuploadedcount = allfilesuploadedcount + 1\n else:\n s3Log.info (\"FileSize > 1GB for :{}, so using Multi Part upload. \\n\".format(eachfiledic[\"filepath\"]) )\n if self.multiPartUpload(eachfiledic) == True:\n eachfiledic[\"uploadedSuccess\"] = 1\n allfilesuploadedcount = allfilesuploadedcount + 1\n\n\n elif eachfiledic[\"uploadedSuccess\"] == 1: #Means it got uploaded in the last run.\n allfilesuploadedcount = allfilesuploadedcount + 1\n\n self.saveStateOfThisRun()\n if len(self.fileTobeUploaded) == allfilesuploadedcount: #Means we uploaded all files in the queue\n return True\n else:\n return False",
"def ensure_static_exists():\n for entry in html_static_path:\n static_path = os.path.join(__repo_docs__, entry)\n if not os.path.isdir(static_path):\n os.makedirs(static_path)",
"def upload_artifacts(ctx: Context, salt_version: str, artifacts_path: pathlib.Path):\n ctx.info(\"Preparing upload ...\")\n s3 = boto3.client(\"s3\")\n to_delete_paths: list[dict[str, str]] = []\n remote_path = f\"release-artifacts/{salt_version}\"\n try:\n ret = s3.list_objects(\n Bucket=tools.utils.STAGING_BUCKET_NAME,\n Prefix=remote_path,\n )\n if \"Contents\" in ret:\n objects = []\n for entry in ret[\"Contents\"]:\n if entry[\"Key\"].endswith(\".release-backup-done\"):\n continue\n objects.append({\"Key\": entry[\"Key\"]})\n to_delete_paths.extend(objects)\n except ClientError as exc:\n if \"Error\" not in exc.response:\n raise\n if exc.response[\"Error\"][\"Code\"] != \"404\":\n raise\n\n if to_delete_paths:\n with tools.utils.create_progress_bar() as progress:\n bucket_uri = f\"s3://{tools.utils.STAGING_BUCKET_NAME}/{remote_path}\"\n task = progress.add_task(f\"Deleting '{bucket_uri}'\", total=1)\n try:\n ret = s3.delete_objects(\n Bucket=tools.utils.STAGING_BUCKET_NAME,\n Delete={\"Objects\": objects},\n )\n except ClientError:\n log.exception(f\"Failed to delete '{bucket_uri}'\")\n finally:\n progress.update(task, advance=1)\n\n ctx.info(\"Uploading release artifacts ...\")\n to_upload_paths: list[pathlib.Path] = []\n copy_exclusions = [\n \".json\",\n ]\n for fpath in artifacts_path.iterdir():\n if fpath.suffix in copy_exclusions:\n continue\n to_upload_paths.append(fpath)\n\n try:\n for fpath in to_upload_paths:\n upload_path = f\"{remote_path}/{fpath.name}\"\n size = fpath.stat().st_size\n ctx.info(f\" {upload_path}\")\n with tools.utils.create_progress_bar(file_progress=True) as progress:\n task = progress.add_task(description=\"Uploading...\", total=size)\n s3.upload_file(\n str(fpath),\n tools.utils.STAGING_BUCKET_NAME,\n upload_path,\n Callback=tools.utils.UpdateProgress(progress, task),\n )\n except KeyboardInterrupt:\n pass",
"def _process_task_log(self):\n directory = self._executor.log_dir\n if os.path.exists(directory):\n for root, _dirs, files in os.walk(directory):\n for name in files:\n filepath = os.path.join(root, name)\n object_name = str(self._task.project_id) + \"/\" + self._task.node_id + \"/log/\" + name\n if not self._s3.client.upload_file(self._s3.bucket, object_name, filepath):\n log.error(\"Error uploading file to S3\")",
"def create_buckets(self):\n\n # 1. Create bucket\n for name in [BUCKET_1_SRC, BUCKET_1_DST, BUCKET_2_SRC, BUCKET_2_DST, BUCKET_3_SRC, BUCKET_3_DST]:\n self.create_gcs_bucket(name)\n\n # 2. Prepare parents\n first_parent = f\"gs://{BUCKET_1_SRC}/parent-1.bin\"\n second_parent = f\"gs://{BUCKET_1_SRC}/parent-2.bin\"\n\n self.execute_with_ctx(\n [\n \"bash\",\n \"-c\",\n f\"cat /dev/urandom | head -c $((1 * 1024 * 1024)) | gsutil cp - {first_parent}\",\n ],\n key=GCP_GCS_KEY,\n )\n\n self.execute_with_ctx(\n [\n \"bash\",\n \"-c\",\n f\"cat /dev/urandom | head -c $((1 * 1024 * 1024)) | gsutil cp - {second_parent}\",\n ],\n key=GCP_GCS_KEY,\n )\n\n self.upload_to_gcs(first_parent, f\"gs://{BUCKET_1_SRC}/file.bin\")\n self.upload_to_gcs(first_parent, f\"gs://{BUCKET_1_SRC}/subdir/file.bin\")\n self.upload_to_gcs(first_parent, f\"gs://{BUCKET_2_SRC}/file.bin\")\n self.upload_to_gcs(first_parent, f\"gs://{BUCKET_2_SRC}/subdir/file.bin\")\n self.upload_to_gcs(second_parent, f\"gs://{BUCKET_2_DST}/file.bin\")\n self.upload_to_gcs(second_parent, f\"gs://{BUCKET_2_DST}/subdir/file.bin\")\n self.upload_to_gcs(second_parent, f\"gs://{BUCKET_3_DST}/file.bin\")\n self.upload_to_gcs(second_parent, f\"gs://{BUCKET_3_DST}/subdir/file.bin\")\n\n self.delete_gcs_bucket(first_parent)\n self.delete_gcs_bucket(second_parent)",
"def dispatch(self, request, *args, **kwargs):\n try:\n self.copy_static_resources()\n except MissingStaticRoot:\n self.template_name = 'general_error.html'\n kwargs['error'] = _('There is no STATIC_ROOT defined in the settings file')\n return super().dispatch(request, *args, **kwargs)\n except Exception as e:\n self.template_name = 'general_error.html'\n kwargs['error'] = str(e)\n return super().dispatch(request, *args, **kwargs)\n cms_pages = Page.objects.filter(publication_date__isnull=False)\n for page in cms_pages:\n languages = page.get_languages()\n for language in languages:\n url = page.get_public_url(language)\n if url not in self.done:\n self.done.append(url)\n static_page_path = '{}{}index.html'.format(STORAGE_PATH, url)\n fetch_url = \"{}{}\".format(self.SOURCE_DOMAIN, url)\n response = requests.get(fetch_url)\n make_dir(url)\n with open(static_page_path, 'w') as file:\n file.write(response.text)\n return super().dispatch(request, *args, **kwargs)",
"def deploy_static_media(env=None, asset_version='', quick=False, haus_vars={}):\n print green('Deploying static media {}'.format('__quick__' if quick else ''))\n collectstatic(no_input=True, skip_admin=quick)",
"def copy_images(repositories, static_dir):\n for repository in repositories:\n if repository.has_key('branch'):\n branch = repository['branch']\n else:\n branch = retrieve_current_branch(repository_directory=os.curdir, fix_environment=True)\n dir = fetch_repository(repository['url'], workdir=os.curdir, branch=branch)\n package_static_dir = os.path.join(dir, repository['package_name'], 'static')\n if os.path.exists(package_static_dir):\n copytree(package_static_dir, os.path.join(static_dir, repository['package_name']))",
"def upload(jsonfiles):\n # clear S3 Bucket\n bucket = S3Bucket()\n bucket.clear()\n for jsonfile in jsonfiles:\n filename = os.path.basename(jsonfile)\n key = build_key(filename)\n logging.info(\"%s %s\", filename, key)\n # store json in S3 object\n bucket.store(key, jsonfile)"
] |
[
"0.6210542",
"0.61488205",
"0.5865301",
"0.585198",
"0.58491004",
"0.58321655",
"0.5650139",
"0.5592172",
"0.5504334",
"0.54761356",
"0.5457472",
"0.5440076",
"0.54384625",
"0.5431056",
"0.5406941",
"0.5380988",
"0.5366491",
"0.53493273",
"0.5324381",
"0.53202146",
"0.5297198",
"0.52804065",
"0.5241121",
"0.5181118",
"0.5133253",
"0.51217544",
"0.5097527",
"0.50975126",
"0.50973344",
"0.50950176"
] |
0.81745136
|
0
|
Returns a |random value| <= SHIFT_MAX_VAL
|
def get_shift() -> int:
return random.randint(low = -1 *SHIFT_MAX_VAL, high = SHIFT_MAX_VAL)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_next_random(value, max_value, min_value, max_delta):\n # Determine if sensor delta should be added or substracted.\n if value == max_value:\n add = False\n elif value == min_value:\n add = True\n else:\n add = random.random() > 0.5\n\n # Calculate a new delta.\n delta = random.randint(0, max_delta)\n\n # Apply the delta.\n if add:\n value += delta\n else:\n value -= delta\n if value > max_value:\n value = max_value\n elif value < min_value:\n value = min_value\n\n return value",
"def test_generator_downward(narrow_power_range):\n with patch('random.randint', side_effect=lambda a,b: -1):\n range_min, range_max = narrow_power_range\n for msg in it.islice(generate_msgs(range_min, range_max), 0, 5):\n pass\n power = Message.parse(msg).power\n assert power == range_min",
"def randbelow_from_randbits(self, n):\n k = int(n-1).bit_length()\n r = self.getrandbits(k) # 0 <= r < 2**k\n while int(r) >= n:\n r = self.getrandbits(k)\n return int(r)",
"def randint(maxvalue):\n\n bit_size = common.bit_size(maxvalue)\n\n tries = 0\n while True:\n value = read_random_int(bit_size)\n if value <= maxvalue:\n break\n\n if tries % 10 == 0 and tries:\n # After a lot of tries to get the right number of bits but still\n # smaller than maxvalue, decrease the number of bits by 1. That'll\n # dramatically increase the chances to get a large enough number.\n bit_size -= 1\n tries += 1\n\n return value",
"def rand_val(max):\n order = math.ceil(math.log10(max)) #Determine the num of digits in size\n index = math.floor(random.SystemRandom().random() * (10 ** order))\n\n # Yea, this is quite inefficient\n while (index >= max):\n index = math.floor(random.SystemRandom().random() * (10 ** order))\n\n return index",
"def test_in_range_0_1():\n g = RG.larger_random()\n assert 0 <= next(g) <= 1",
"def get_number(maxValue):\r\n return random.randint(1, maxValue)",
"def test_generator_continuous():\n RANGE_MAX = 100\n prev_value = RANGE_MAX // 2\n for msg in it.islice(generate_msgs(0, RANGE_MAX), 0, 42):\n curr_value = Message.parse(msg).power\n assert curr_value - prev_value <= 1\n prev_value = curr_value",
"def randomShiftVector(values, smin, smax):\n\tshift = np.random.uniform(smin, smax)\n\treturn list(map(lambda va: va + shift, values))",
"def binary_blow_wind():\n s = random.random()\n return s < 0.05",
"def randInt(max):\n return int(max * random.random())",
"def get_random_integer():\n return random.randint(-MAX_GENERATED_NUMBER_RANGE, MAX_GENERATED_NUMBER_RANGE)",
"def _random_max_wrap(*args):\n _, opt_pt = random_maximise(*args)\n return opt_pt",
"def test_generator_upward(narrow_power_range):\n with patch('random.randint', side_effect=lambda a,b: 1):\n range_min, range_max = narrow_power_range\n for msg in it.islice(generate_msgs(range_min, range_max), 0, 5):\n pass\n power = Message.parse(msg).power\n assert power == range_max",
"def maybe(self):\n return random.getrandbits(1)",
"def get_offset(limit=12):\n return random.randrange(0, limit)",
"def fix_rand_value(lo_bound: float, up_bound: float) -> float:\n # In this patch test function for determinism, just return lower bound\n nonlocal _i, _vals_sequence\n v_return = _vals_sequence[_i]\n _i = (_i + 1) % len(_vals_sequence)\n return v_return",
"def _bitsfor(maxval):\n maxvalbits = int(round(math.log(maxval) / math.log(2)))\n if maxval != (1 << maxvalbits):\n raise ValueError(\"maxval must be a power of 2, not %d\" % maxval)\n return maxvalbits",
"def random_shift(x, fraction):\n min_x, max_x = np.min(x), np.max(x)\n m = np.random.uniform(-fraction, fraction, size=x.shape) + 1\n return np.clip(x * m, min_x, max_x)",
"def roll(self):\n return randint(1,6)",
"def test_always_larger():\n g = RG.larger_random()\n first = next(g)\n second = next(g)\n assert second > first",
"def _get_random_value(self):\r\n return random.randint(1, 10)",
"def create_random_index(self, max:int):\n return random.randint(0, max - 1)",
"def random_int(max=1000):\r\n return randint(0, max)",
"def sample(self):\n L = e ** (-self.lamb)\n k, p = 1, rand()\n while p > L:\n k += 1\n p *= rand()\n return k - 1",
"def seed_random(max_integer):\n return random.randrange(0,max_integer);",
"def random_pitch_shift(\n spectrogram: tf.Tensor, shift_min: float = -1.0, shift_max: float = 1.0, **kwargs\n) -> tf.Tensor:\n semitone_shift = (\n tf.random_uniform(shape=(1,), seed=0) * (shift_max - shift_min) + shift_min\n )\n return pitch_shift(spectrogram, semitone_shift=semitone_shift, **kwargs)",
"def pull(self):\n chance = np.random.uniform()\n return chance < self.winning_prob",
"def constrain(value):\n size = 2**m\n return (value%size)",
"def _limit_fill():\n z = random.randint(0, 10)\n if z/10.0 < LIMIT_FILL_PROBABILITY:\n return True\n else:\n return False"
] |
[
"0.6203564",
"0.60628116",
"0.600135",
"0.59833807",
"0.59038484",
"0.58453417",
"0.58328253",
"0.5776378",
"0.5762465",
"0.575023",
"0.5747984",
"0.57101095",
"0.57073265",
"0.57069814",
"0.5686693",
"0.5646487",
"0.5634794",
"0.5613866",
"0.5598931",
"0.559132",
"0.55743605",
"0.5573085",
"0.55629975",
"0.55526876",
"0.55520004",
"0.55499864",
"0.55307555",
"0.55276936",
"0.54975766",
"0.5492536"
] |
0.7717284
|
0
|
load all of imagenet data as flat vector
|
def load_imagenet(directory):
path_train, path_val = directory + '/ILSVRC2012_img_train', directory + '/ILSVRC2012_img_val'
train_labels = os.listdir(path_train)
train_data = []
for label in train_labels:
imgs_path = os.path.join(path_train, label)
imgs = os.listdir(imgs_path)
for img_name in imgs:
img_path = os.path.join(imgs_path, img_name)
img = cv2.imread(img_path)
b, g, r = cv2.split(img)
img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)
train_data.append(img)
train_labels.append(label)
train_data = np.concatenate(train_data)
train_labels = np.array(train_labels, dtype='str')
test_labels = os.listdir(path_val)
test_data = []
for label in test_labels:
imgs_path = os.path.join(path_val, label)
for img_name in imgs:
img_path = os.path.join(imgs_path, img_name)
img = cv2.imread(img_path)
b, g, r = cv2.split(img)
img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)
test_data.append(img)
test_labels.append(label)
test_data = np.concatenate(test_data)
test_labels = np.array(test_labels, dtype='str')
_, train_labels = np.unique(train_labels, return_inverse=True)
_, test_labels = np.unique(test_labels, return_inverse=True)
del r, g, b, imgs_path, img_name, img, imgs
return train_data, train_labels, test_data, test_labels
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_imagenet_data(net):\r\n\r\n # get a list of all the images (note that we use networks trained on ImageNet data)\r\n img_list = os.listdir(path_data)\r\n\r\n # throw away files that are not in the allowed format (png or jpg)\r\n for img_file in img_list[:]:\r\n if not (img_file.endswith(\".png\") or img_file.endswith(\".jpg\")):\r\n img_list.remove(img_file)\r\n \r\n # fill up data matrix\r\n img_dim = net.crop_dims\r\n X = np.empty((0, img_dim[0], img_dim[1], 3))\r\n X_filenames = []\r\n for i in range(len(img_list)):\r\n np_img = np.float32(PIL.Image.open('{}/{}'.format(path_data, img_list[i])))\r\n if np_img.shape[0] >= img_dim[0] and np_img.shape[1] >= img_dim[1]:\r\n o = 0.5*np.array([np_img.shape[0]-img_dim[0], np_img.shape[1]-img_dim[1]])\r\n X = np.vstack((X, np_img[o[0]:o[0]+img_dim[0], o[1]:o[1]+img_dim[1], :][np.newaxis]))\r\n X_filenames.append(img_list[i].replace(\".\",\"\"))\r\n else:\r\n print(\"Skipped \",img_list[i],\", image dimensions were too small.\")\r\n\r\n # the number of images we found in the folder\r\n num_imgs = X.shape[0]\r\n\r\n # cast to image values that can be displayed directly with plt.imshow()\r\n X_im = np.uint8(X)\r\n \r\n # preprocess\r\n X_pre = np.zeros((X.shape[0], 3, img_dim[0], img_dim[1]))\r\n for i in range(num_imgs):\r\n X_pre[i] = net.transformer.preprocess('data', X[i])\r\n X = X_pre\r\n \r\n return X, X_im, X_filenames",
"def load_data(path,size, scale = True):\n images = os.listdir(path)\n images.sort()\n\n X = []\n for i, img in enumerate(images):\n photo = plt.imread(os.path.join(path,img))\n if size:\n photo = tf.image.resize(photo, (size, size))\n X.append(photo)\n \n X = np.array(X)\n if scale:\n X = X/X.max() \n return X",
"def _get_data(path):\n archive = np.load(path)\n images = archive['faceData']\n return images",
"def _load_data(self, imagepath):\n im = cv2.imread(imagepath)\n self.net.blobs['data'].data[...] = self.transformer.preprocess('data', im)",
"def main(image_directory, cuda=False):\n\n BATCH_SIZE = 256\n\n if cuda:\n model = torch.nn.Sequential(*list(models.resnet18(pretrained=True).children())[:-1]).cuda()\n else:\n model = torch.nn.Sequential(*list(models.resnet18(pretrained=True).children())[:-1])\n model.eval()\n\n all_vectors = []\n\n dataset = Dataset(image_directory)\n loader = torch.utils.data.DataLoader(dataset,\n batch_size=BATCH_SIZE,\n collate_fn=collate,\n num_workers=4)\n\n for inputs, meta in tqdm(loader):\n if cuda:\n inputs = Variable(inputs.cuda())\n else:\n inputs = Variable(inputs)\n\n vectors = model(inputs).cpu().data.numpy()\n meta = map(lambda x: (x[0],\n x[1],\n int(re.search('image_(\\d+).jpg',\n x[2]).group(1))),\n meta)\n print(meta)\n print(vectors)\n all_vectors.append(\n np.concatenate(\n [np.array(meta), vectors.squeeze()],\n axis=1\n )\n )\n\n all_vectors = np.concatenate(all_vectors)\n np.save('vectors.npy', all_vectors)",
"def load_data():\n\n training_files_dir = \"digits/trainingDigits\"\n training_files = os.listdir(training_files_dir)\n file_num = len(training_files)\n hw_labels = []\n\n training_mat = zeros((file_num, 32 * 32))\n for i in xrange(file_num):\n filename = training_files[i]\n file_label = int((filename.split(\".\")[0]).split(\"_\")[0])\n hw_labels.append(file_label)\n training_mat[i, :] = img2vector(training_files_dir + '/' + filename)\n\n return training_mat, hw_labels",
"def get_raw_data():\n\twith open('train_label.pkl', 'rb') as f:\n\t\ttrain_label = pickle.load(f)\n\n\twith open('train_image.pkl', 'rb') as f:\n\t\ttrain_data = pickle.load(f)\n\n\tprint(np.unique(np.asarray(train_label)))\n\n\treturn (train_label, np.asarray(train_data))",
"def load_one_img(ds):\n for img in ds.take(1):\n img = img[1, ...]\n yuv_image_tensor = tf.expand_dims(img, axis=0)\n\n return yuv_image_tensor",
"def load_tiny_imagenet(directory):\n path_train, path_val, path_test = directory + '/train', directory + '/val', directory + '/test'\n labels = os.listdir(path_train)\n train_data = []\n train_labels = []\n for label in labels:\n imgs_path = os.path.join(path_train, label, 'images')\n imgs = os.listdir(imgs_path)\n for img_name in imgs:\n img_path = os.path.join(imgs_path, img_name)\n img = cv2.imread(img_path)\n b, g, r = cv2.split(img)\n img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)\n train_data.append(img)\n train_labels.append(label)\n train_data = np.concatenate(train_data)\n train_labels = np.array(train_labels, dtype='str')\n \n test_data = []\n test_labels = []\n with open(path_val+'/val_annotations.txt', 'r') as f:\n val_annotations = [line.strip().split('\\t') for line in f]\n val_annotations = np.array(val_annotations)\n imgs_path = os.path.join(path_val, 'images')\n imgs = os.listdir(imgs_path)\n for img_name in imgs:\n img_path = os.path.join(imgs_path, img_name)\n img = cv2.imread(img_path)\n b, g, r = cv2.split(img)\n img = cv2.merge([r,g,b]).reshape(-1, 64, 64, 3)\n test_data.append(img)\n label = val_annotations[val_annotations[:, 0] == img_name, 1].astype('U9')\n test_labels.append(label)\n test_data = np.concatenate(test_data)\n test_labels = np.concatenate(test_labels)\n test_labels = np.array(test_labels, dtype='str')\n \n _, train_labels = np.unique(train_labels, return_inverse=True)\n _, test_labels = np.unique(test_labels, return_inverse=True)\n \n del r, g, b, label, labels, imgs_path, img_name, img, imgs, val_annotations\n \n return train_data, train_labels, test_data, test_labels",
"def load_image_data():\n print(\"Loading image data...\")\n label_dict = get_label_vectors()\n categories = [c for c in os.listdir('images/') if c[0] != '.'] # ignore\n labels = [] # instantiate list for image labels\n data = [] # instantiate list for image data\n for i in categories:\n path = 'images/{}/'.format(i) # define path to category folder\n for j in os.listdir(path): # get images from category folder\n labels.append(label_dict[i]) # append label vector\n data.append(cv2.imread(path + j).flatten()) # append flattened image data\n\n labels = np.array(labels) # convert lists to array\n data = np.array(data)\n print(\"Done.\")\n\n return labels, data",
"def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,'%s-labels-idx1-ubyte.gz'% kind)\n\n images_path = os.path.join(path,'%s-images-idx3-ubyte.gz'% kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,offset=16).reshape(len(labels), 784)\n\n print(\"Dataset Loaded\")\n \n return images, labels",
"def flatten(file_name):\n dataset = pickle.load(open(file_name, 'rb'))\n train_data = dataset['train']\n test_data = dataset['test']\n\n train_data = [y for x in train_data for y in x]\n test_data = [y for x in test_data for y in x]\n\n train_data=generate_binary_vectors(train_data,False)\n test_data=generate_binary_vectors(test_data,False)\n\n return train_data, test_data",
"def _read_datafile(self,path):\n \tlabels, images = [], []\n \twith gzip.GzipFile(path) as f:\n \t for line in f:\n \t vals = line.strip().split()\n \t labels.append(float(vals[0]))\n \t images.append([float(val) for val in vals[1:]])\n \tlabels = np.array(labels, dtype=np.int32)\n \tlabels[labels == 10] = 0 # fix weird 0 labels\n \timages = np.array(images, dtype=np.float32).reshape(-1, 16, 16, 1)\n \timages = (images + 1) / 2\n \treturn images, labels",
"def main():\n labels, data = load_image_data()\n print(labels.shape, data.shape)",
"def load_data(self) -> tuple:\n label_num = {}\n data_set = pathlib.Path(self.path)\n data = []\n\n # create the label lookup dict for verifcation later\n for i, v in enumerate(data_set.iterdir()):\n label_num[v.name] = i\n self.labels[i] = v.name\n # end\n\n # read images\n for img_path in data_set.rglob(\"*.jpg\"):\n lbl = label_num[str(img_path.parent.stem)]\n img = cv2.imread(str(img_path))\n img = cv2.resize(img, self.dims, interpolation=cv2.INTER_AREA)\n\n # flatten RGB data into a vector\n # NOTE: NOT ACTUALLY NECESSARY! \n img.flatten()\n\n # label the sample and append to temp data list\n sample = np.append(lbl, img)\n data.append(sample)\n # end\n\n # partition and package the data (*_ ensures safe unpacking)\n train, test, validate, *_ = Data.partition(data, self.parts, 0.7, 0.2)\n self.train = Data(train)\n self.test = Data(test)\n self.validate = Data(validate)",
"def load_data(class_fnames):\n X = []\n y = []\n for label, fnames in enumerate(class_fnames):\n for fname in fnames:\n X.append(cv2.imread(fname))\n y.append(label)\n X = np.stack(X)\n y = np.stack(y)\n return X, y",
"def load_data():\n\n # Load data\n # You can create this Numpy datafile by running the create_validation_sample.py script\n df = h5py.File(data_fn, \"r\")\n imgs_validation = df[\"imgs_validation\"]\n msks_validation = df[\"msks_validation\"]\n img_indicies = range(len(imgs_validation))\n\n \"\"\"\n OpenVINO uses channels first tensors (NCHW).\n TensorFlow usually does channels last (NHWC).\n So we need to transpose the axes.\n \"\"\"\n input_data = imgs_validation\n msks_data = msks_validation\n return input_data, msks_data, img_indicies",
"def loadData(path):\r\n X = []\r\n y = []\r\n dir1 = os.listdir(path)\r\n for d1 in dir1:\r\n dir2 = os.listdir(path+'/'+d1)\r\n for d2 in dir2:\r\n if int(d1) == 0:\r\n image = cv2.imread(path+r'/'+d1+r'/'+d2, 0)\r\n X.append(np.array(image, dtype=np.float32).reshape(-1) / 255.0)\r\n y.append(1)\r\n elif int(d1) == 1:\r\n image = cv2.imread(path+r'/'+d1+r'/'+d2, 0)\r\n X.append(np.array(image, dtype=np.float32).reshape(-1) / 255.0)\r\n y.append(-1)\r\n X = np.array(X, dtype=np.float32)\r\n y = np.array(y, dtype=np.int64)\r\n perm = np.random.permutation(X.shape[0])\r\n X = X[perm]\r\n y = y[perm]\r\n return X, y",
"def get_data(folder):\n X = []\n y = []\n\n for seismic_type in os.listdir(folder):\n if not seismic_type.startswith('.'):\n if seismic_type in ['Class1']:\n label = '0'\n else:\n label = '1'\n for image_filename in os.listdir(folder + seismic_type):\n img_file = cv2.imread(folder + seismic_type + '/' + image_filename)\n if img_file is not None:\n # Downsample the image to 120, 160, 3\n #img_file = scipy.misc.imresize(arr=img_file, size=(120, 160, 3))\n img_arr = np.asarray(img_file)\n # img_arr = image.img_to_array(img_arr)\n X.append(img_arr)\n y.append(label)\n X = np.asarray(X)\n y = np.asarray(y)\n return X,y",
"def loadData(image, mask, im_shape):\r\n X, y = [], []\r\n\r\n img = transform.resize(image, im_shape, mode='constant')\r\n img = np.expand_dims(img, -1)\r\n mask = transform.resize(mask, im_shape, mode='constant')\r\n mask = np.expand_dims(mask, -1)\r\n X.append(img)\r\n y.append(mask)\r\n X = np.array(X)\r\n y = np.array(y)\r\n X -= X.mean()\r\n X /= X.std()\r\n\r\n return X, y",
"def load_dataset(path_test, width, height):\n tot_images = 0\n for label in listdir(path_test):\n label_full = join(path_test, label)\n for img_name in listdir(label_full):\n tot_images += 1\n\n # allocate the memory\n # THE DTYPE is float, should be the right one\n all_images = np.zeros((tot_images, width, height, 3))\n\n true_labels = []\n num_images = 0\n for label in listdir(path_test):\n label_full = join(path_test, label)\n for img_name in listdir(label_full):\n # for img_name in listdir(label_full)[:10]:\n img_name_full = join(label_full, img_name)\n print(f\"Opening {img_name_full} {width}\")\n\n image = cv2.imread(img_name_full)\n\n image = cv2.resize(image, (width, height))\n\n # scale the pixel values to [0, 1]\n image = image.astype(\"float\") / 255.0\n\n all_images[num_images, :, :, :] = image\n\n num_images += 1\n true_labels.append(label)\n\n print(f\"All_images.shape {all_images.shape}\")\n\n # cv2.imshow('Resized all_images[0]', all_images[0])\n # cv2.waitKey(0)\n\n return all_images, true_labels",
"def load(data, feature):\n #Settings\n train_path = os.path.join(\"data\", data, feature) #put your image path here if you want to override current directory\n\n X = []\n y = []\n for f in os.listdir(train_path):\n (X_i, y_i) = cPickle.load(open(os.path.join(train_path,f), \"rb\"))\n if type(X_i) is np.ndarray:\n X_i = X_i.tolist()\n X = X + X_i #Append the two lists together\n y = y + y_i\n assert np.size(X,0) == 50000 or np.size(X,0) == 10000\n assert np.size(y) == 50000 or np.size(y) == 10000\n # Raws are stored as SimpleCV Images so they can easily be converted to\n # features using SimpleCV\n # Since machine learning aglorithms take feature vectors as inputs, we\n # flatten the underlying 3D matrices of the images here.\n if feature == \"raw\":\n X = map (lambda img: img.getNumpy().flatten(), X)\n return X,y",
"def view_image(train_dataloader):\n for (x, target) in train_dataloader:\n np.save(\"img.npy\", x)\n print(x.shape)\n exit(0)",
"def load_batch(filename: str) -> Tuple[ndarray, ndarray, ndarray]:\n dataDict = unpickle(filename)\n print(\"1\", dataDict[b\"data\"][1, :])\n X = (dataDict[b\"data\"] / 255).T\n print(\"2\", X[:, 1])\n y = np.array(dataDict[b\"labels\"])\n Y = np.eye(10)[y].T\n return X, Y, y",
"def load_from_array():\n\n x = np.load(settings.data(\"x.npy\")).reshape(-1, 1, 224, 224)\n y = np.load(settings.data(\"y.npy\"))\n\n return x, y",
"def read_batch(self):\n imgs = []\n labels = []\n idx = np.random.choice(self.nImgs,self.batch_size)\n \tfor i in idx:\n imgs.append(cv2.imread(self.data_files[i]))\n \t labels.append(cv2.imread(self.label_files[i]))\n \timgs,labels = np.array(imgs),np.array(labels)\n imgs = (imgs - self.mean)/self.stddev\n \tlabels = (labels - self.mean)/self.stddev\n return imgs,labels",
"def load_data_pkl(self):\n pkl_name = '{}/data/mini-imagenet-cache-{}.pkl'.format(self.root_dir, self.split)\n print('Loading pkl dataset: {} '.format(pkl_name))\n\n try:\n with open(pkl_name, \"rb\") as f:\n data = pkl.load(f, encoding='bytes')\n image_data = data[b'image_data']\n class_dict = data[b'class_dict']\n except:\n with open(pkl_name, \"rb\") as f:\n data = pkl.load(f)\n image_data = data['image_data']\n class_dict = data['class_dict']\n\n print(data.keys(), image_data.shape, class_dict.keys())\n data_classes = sorted(class_dict.keys()) # sorted to keep the order\n\n n_classes = len(data_classes)\n print('n_classes:{}, n_label:{}, n_unlabel:{}'.format(n_classes,self.n_label,self.n_unlabel))\n dataset_l = np.zeros([n_classes, self.n_label, self.im_height, self.im_width, self.channels], dtype=np.float32)\n if self.n_unlabel>0:\n dataset_u = np.zeros([n_classes, self.n_unlabel, self.im_height, self.im_width, self.channels], dtype=np.float32)\n else:\n dataset_u = []\n\n for i, cls in enumerate(data_classes):\n idxs = class_dict[cls] \n np.random.RandomState(self.seed).shuffle(idxs) # fix the seed to keep label,unlabel fixed\n dataset_l[i] = image_data[idxs[0:self.n_label]]\n if self.n_unlabel>0:\n dataset_u[i] = image_data[idxs[self.n_label:]]\n print('labeled data:', np.shape(dataset_l))\n print('unlabeled data:', np.shape(dataset_u))\n \n self.dataset_l = dataset_l\n self.dataset_u = dataset_u\n self.n_classes = n_classes\n\n del image_data",
"def get_data(path):\n all_images_as_array=[]\n label=[]\n for filename in os.listdir(path):\n try:\n if re.match(r'positive',filename):\n label.append(1)\n else:\n label.append(0)\n img=cv2.imread(path + filename)\n (b, g, r)=cv2.split(img)\n img=cv2.merge([r,g,b])\n np_array = np.asarray(img)\n l,b,c = np_array.shape\n np_array = np_array.reshape(l*b*c,)\n all_images_as_array.append(np_array)\n except:\n continue\n return np.array(all_images_as_array), np.array(label)",
"def load_data():\n dirname = os.path.join('datasets', 'fashion-mnist')\n base = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'\n files = [\n 'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',\n 't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'\n ]\n\n paths = []\n for fname in files:\n paths.append(get_file(fname, origin=base + fname, cache_subdir=dirname))\n\n with gzip.open(paths[0], 'rb') as lbpath:\n y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)\n\n with gzip.open(paths[1], 'rb') as imgpath:\n x_train = np.frombuffer(\n imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)\n\n with gzip.open(paths[2], 'rb') as lbpath:\n y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)\n\n with gzip.open(paths[3], 'rb') as imgpath:\n x_test = np.frombuffer(\n imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28)\n\n return (x_train, y_train), (x_test, y_test)",
"def load_images(filename='training_images'): \n file_path = os.path.join(DATA_DIR, filename)\n with open(file_path, 'rb') as f:\n b = f.read() # hope ya get it all\n\n # grab the first four numbers ...\n # fmt='>i' means big-endian int32\n magic, n_images, n_rows, n_cols = (struct.unpack('>i', b[i*4:(i+1)*4]) for i in range(4))\n\n # i am a god-fearing man\n assert magic[0] == 2051, \"bad magic number, what do?\"\n\n\n # so i think you can use the standard libary's \"array\" for this, just\n # because binary data of any sort is kinda dodgy, but this grabs 'the rest'\n # format='B' means unsigned char === 'uint8', and apparently endianness doesn't matter\n image_stream = array.array('B', b[16:])\n\n # so each 28*28 byte portion of image_stream is a flattened image. these two\n # numpy.reshape calls get it into the desired shape for A. maybe could\n # combine it into one call, idk. anyway, each flattened image appears as a\n # row, and there is a row for each image.\n image_first = numpy.reshape(image_stream, (n_images[0], n_rows[0], n_cols[0]))\n images = image_first.reshape(n_images[0], n_rows[0]*n_cols[0])\n\n # convert to float in [0,1]\n images = images.astype('f') / 255\n\n return images"
] |
[
"0.6885546",
"0.6619148",
"0.6589122",
"0.6569712",
"0.65382355",
"0.6508243",
"0.6401413",
"0.6387084",
"0.63791114",
"0.6374463",
"0.6297042",
"0.6268717",
"0.6249637",
"0.6221783",
"0.61718935",
"0.61413294",
"0.6102105",
"0.6062337",
"0.6045171",
"0.60450935",
"0.6041338",
"0.6030839",
"0.6005366",
"0.5979027",
"0.5977899",
"0.5972258",
"0.59704506",
"0.59702134",
"0.59635496",
"0.59613544"
] |
0.6851224
|
1
|
Take as input a Keras ImageGen (Iterator) and generate random crops from the image batches generated by the original iterator.
|
def random_crop_generator(batches, crop_length):
while True:
batch_x, batch_y = next(batches)
batch_crops = np.zeros((batch_x.shape[0], crop_length, crop_length, 3))
for i in range(batch_x.shape[0]):
batch_crops[i] = random_crop(batch_x[i], (crop_length, crop_length))
yield (batch_crops, batch_y)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def crop_generator(batches, crop_length):\n while True:\n batch_x, batch_y = next(batches)\n batch_crops = np.zeros((batch_x.shape[0], crop_length, crop_length, 3))\n for i in range(batch_x.shape[0]):\n batch_crops[i] = random_crop(batch_x[i], (crop_length, crop_length))\n yield (batch_crops, batch_y)",
"def flow(self, batch_size=32, output='both', crops=0):\n while True:\n for dataset in self.input_sets:\n X = self.training_set['input/'+dataset]\n y = self.training_set['target/'+dataset]\n y_seg = self.training_set['seg_map/'+dataset]\n\n for i in range(int(math.ceil(X.shape[0]/2000))):\n index = list(range(0,X.shape[0]))\n sample = random.sample(index, batch_size)\n sample.sort()\n X_batch = X[sample, ...]\n y_batch = y[sample, ...]\n y_seg_batch = y_seg[sample, ...]\n X_batch = self.augment(X_batch)\n\n if crops > 0:\n (X_batch, y_batch,\n y_seg_batch) = _augmentors.random_crops(\n X_batch, y_batch, y_seg_batch, n_crops=crops, crop_dim=20)\n\n if output=='both':\n yield (X_batch, [y_batch, y_seg_batch])\n elif output=='seg':\n yield (X_batch, y_seg)\n elif output=='density':\n yield (X_batch, y_batch)\n else:\n raise Exception('output must be \"density\", \"seg\" or \"both\"')",
"def patchGenerator(gen, patch_size=128, patch_batch_size=1):\n \n for imgs, masks in gen: # For each batch\n img_list = []\n mask_list = []\n for i in range(0, imgs.shape[0]): # For each image in a batch\n patch_x = patchify(imgs[i], (patch_size, patch_size, imgs[i].shape[-1]), step=patch_size) # split image into 4*4 small 128*128 patches.\n img_p = patch_x.reshape(-1, *patch_x.shape[-3:])\n img_list.append(img_p)\n\n mask_y = patchify(masks[i], (patch_size, patch_size, 1), step=patch_size) # split mask into 4*4 small 128*128 patches.\n mask_p = mask_y.reshape(-1, *mask_y.shape[-3:])\n mask_list.append(mask_p)\n \n if (patch_batch_size == 1):\n for j in range(0, img_p.shape[0]): # For each patch in a image\n yield img_p[j][np.newaxis, :], mask_p[j][np.newaxis, :]\n \n if (patch_batch_size > 1):\n image_patches = np.concatenate(img_list)\n mask_patches = np.concatenate(mask_list)\n patch_batch_counter = 0\n for idx in range(0, patch_batch_size):\n image_patch_batch = image_patches[patch_batch_counter:patch_batch_counter + patch_batch_size]\n mask_patch_batch = mask_patches[patch_batch_counter:patch_batch_counter + patch_batch_size]\n shuffled_images, shuffled_masks = randomize(image_patch_batch, mask_patch_batch)\n yield shuffled_images, shuffled_masks",
"def train_batches_with_generated_images(gen, x_train_input, X_train, disc_batch_size):\n # output of gen is input of disc\n image_input_batch = x_train_input[np.random.randint(0, X_train.shape[0], size=disc_batch_size), :, :, :]\n noise_X = np.random.uniform(0, 1, size=[disc_batch_size, 100])\n generated_images = gen.predict([image_input_batch, noise_X])\n train_batches(0)",
"def generator(samples, batch_size=32):\n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n sklearn.utils.shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n images = []\n angles = []\n for batch_sample in batch_samples:\n # append center image\n name = 'Sample_data/IMG/'+batch_sample[0].split('/')[-1]\n center_image = cv2.imread(name)\n center_angle = float(batch_sample[3])\n images.append(center_image)\n angles.append(center_angle)\n correction = 0.30 # shift angle commands\n # append left camera image\n left_angle = center_angle + correction\n lname = 'Sample_data/IMG/'+batch_sample[1].split('/')[-1]\n left_image = cv2.imread(lname)\n images.append(left_image)\n angles.append(left_angle)\n \n # append right camera image\n right_angle = center_angle + correction\n rname = 'Sample_data/IMG/'+batch_sample[1].split('/')[-1]\n right_image = cv2.imread(rname)\n images.append(right_image)\n angles.append(right_angle)\n\n # flip image to augment data\n Nsample = len(angles)\n for i in range(len(angles)):\n images.append(np.fliplr(images[i]))\n angles.append(-angles[i])\n\n # trim image to only see section with road\n X_train = np.array(images)\n y_train = np.array(angles)\n yield sklearn.utils.shuffle(X_train, y_train)",
"def trainingBatchGenerator(data_folder, image_shape):\n def get_batches_fn(batch_size):\n \"\"\"\n Create batches of training data\n :param batch_size: Batch Size\n :return: Batches of training data\n \"\"\"\n image_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))\n label_paths = {\n re.sub(r'_(lane|road)_', '_', os.path.basename(path)): path\n for path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))}\n background_color = np.array([255, 0, 0])\n\n random.shuffle(image_paths)\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i+batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n\n gt_bg = np.all(gt_image == background_color, axis=2)\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)\n return get_batches_fn",
"def batch_generator(samples, batch_size=32, is_training=True):\n num_samples = len(samples)\n while True: # Loop forever so the generator never terminates\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples.iloc[offset:offset+batch_size]\n\n images = []\n angles = []\n for batch_sample in batch_samples.iterrows():\n batch_sample = batch_sample[1]\n name = DATA_PATH + '/IMG/'+batch_sample['center'].split('/')[-1]\n center_image = cv2.imread(name)\n center_image = cv2.cvtColor(center_image, cv2.COLOR_BGR2RGB)\n center_angle = float(batch_sample['steering'])\n images.append(center_image)\n angles.append(np.clip(center_angle,-1,1))\n if is_training:\n # Center Flip\n images.append(cv2.flip(center_image,1))\n angles.append(np.clip(center_angle*-1.0,-1,1))\n # Left\n name = DATA_PATH + '/IMG/'+batch_sample['left'].split('/')[-1]\n correction = 0.2\n center_image = cv2.imread(name)\n center_image = cv2.cvtColor(center_image, cv2.COLOR_BGR2RGB)\n images.append(center_image)\n angles.append(np.clip(center_angle+correction,-1,1))\n # Left Flip\n images.append(cv2.flip(center_image,1))\n angles.append(np.clip((center_angle+correction)*-1.0,-1,1))\n # Right\n name = DATA_PATH + '/IMG/'+batch_sample['right'].split('/')[-1]\n correction = -0.2\n center_image = cv2.imread(name)\n center_image = cv2.cvtColor(center_image, cv2.COLOR_BGR2RGB)\n images.append(center_image)\n angles.append(np.clip(center_angle+correction,-1,1))\n # Right Flip\n images.append(cv2.flip(center_image,1))\n angles.append(np.clip((center_angle+correction)*-1.0,-1,1))\n \n X_train = np.array(images)\n y_train = np.array(angles)\n yield shuffle(X_train, y_train)",
"def my_generator(batch_size, img_dir):\n cat_dirs = glob.glob(img_dir + \"/*\")\n counter = 0\n while True:\n input_images = np.zeros(\n (batch_size, config.height, config.width, 3 * 5))\n output_images = np.zeros((batch_size, config.height, config.width, 3))\n random.shuffle(cat_dirs)\n if (counter+batch_size >= len(cat_dirs)):\n counter = 0\n for i in range(batch_size):\n input_imgs = glob.glob(cat_dirs[counter + i] + \"/cat_[0-4]*\") \n imgs = [Image.open(img) for img in sorted(input_imgs)]\n input_images[i] = np.concatenate(imgs, axis=2)\n output_imgs = glob.glob(cat_dirs[counter + i] + \"/cat_[5-7]*\")\n imgs = [Image.open(img) for img in sorted(output_imgs)]\n output_images[i] = np.concatenate(imgs, axis=1)\n input_images[i] /= 255.\n output_images[i] /= 255.\n yield (input_images, output_images)\n counter += batch_size",
"def generator(samples, batch_size=32, is_training=True):\n num_samples = len(samples)\n\n #vertical, horizontal range for random translation\n x_translate_range = 100\n y_translate_range = 10\n\n while 1: # Loop forever so the generator never terminates\n #shuffle the samples once the whole data is processed into batches\n shuffle(samples)\n #split data into batches\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n images = []\n angles = []\n for batch_sample in batch_samples:\n # corrections for centered view image, left camera view image and right camera view image\n corrections = [0,0.2,-0.2]\n # iterate over center, right and left camera view images\n for i in range(3):\n current_path = get_image_path(batch_sample[i])\n\n # read image\n image = cv2.imread(current_path)\n # append image for training/validation\n images.append(preprocess(image))\n\n # calculate angle measurement with applied angle corrections\n measurement = float(batch_sample[3]) + corrections[i]\n angles.append(measurement)\n\n # insert flipped image for opposite direction generalization\n images.append(preprocess(cv2.flip(image, 1)))\n angles.append(measurement*-1.0)\n\n # create random augmented image only for training\n if is_training:\n image, measurement = flip_image(image, measurement, flip_probability=0.5)\n image = add_salt_pepper_noise(image)\n image, measurement = random_translate(image, measurement, x_translate_range, y_translate_range)\n image = random_shadow(image)\n image = random_brightness(image)\n images.append(preprocess(image))\n angles.append(measurement)\n\n # create X, y dataset\n X_train = np.array(images)\n y_train = np.array(angles)\n\n yield sklearn.utils.shuffle(X_train, y_train)",
"def image_generator_not_random(list_of_files, crop_size=320, scale=1):\n while True:\n text_region = []\n for jpgname in list_of_files:\n print jpgname\n # jpgname = np.random.choice(list_of_files)\n img = cv2.imread(jpgname)\n pattern = re.compile('jpg')\n txtname = pattern.sub('txt', jpgname)\n if not os.path.isfile(txtname):\n continue\n cropped_image = img\n with open(txtname, 'r') as f:\n for line in f:\n line_split = line.strip().split(',')\n print line_split\n # clockwise\n (x1, y1, x2, y2) = line_split[0:4]\n (x3, y3, x4, y4) = line_split[4:8]\n text_region.append([string.atof(x1), string.atof(y1), string.atof(x2), string.atof(y2),\n string.atof(x3), string.atof(y3), string.atof(x4), string.atof(y4)])\n if cropped_image is None or text_region is None or \\\n cropped_image.shape[0] != crop_size or cropped_image.shape[1] != crop_size:\n continue\n yield [scale * cropped_image, text_region]",
"def generator(samples, batch_size=32):\n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n samples = sklearn.utils.shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n\n images = []\n angles = []\n for batch_sample in batch_samples:\n #Because the file path in two folders are different, a if-else is needed.\n if len(batch_sample[0].split('/')) == 2:\n name = './data/IMG/'+batch_sample[0].split('/')[-1]\n else:\n name =batch_sample[0]\n originalImage = cv2.imread(name)\n image = cv2.cvtColor(originalImage, cv2.COLOR_BGR2RGB)\n images.append(image)\n measurement = float(line[3])\n angles.append(measurement)\n \n # Flipping\n images.append(cv2.flip(image,1))\n angles.append(measurement*(-1.0))\n\n # trim image to only see section with road\n inputs = np.array(images)\n outputs = np.array(angles)\n yield sklearn.utils.shuffle(inputs, outputs)",
"def generator(array, batch_size):\n start = 0 # pointer to where we are in iteration\n while True:\n stop = start + batch_size\n diff = stop - array.shape[0]\n if diff <= 0:\n batch = array[start:stop]\n start += batch_size\n else:\n batch = np.concatenate((array[start:], array[:diff]))\n start = diff\n batch = batch.astype(np.float32) / 255.0 # normalize pixel intensities\n batch = np.random.binomial(1, batch) # binarize images\n yield batch",
"def generator(samples, batch_size=32):\n num_samples = len(samples)\n while 1: # Loop forever so the generator never terminates\n shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n\n images = []\n angles = []\n for batch_sample in batch_samples:\n path, angle, flip = batch_sample\n image = cv2.imread(path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to RGB\n images.append(flip_image(image) if flip else image)\n angles.append(angle)\n\n # trim image to only see section with road\n X_train = np.array(images)\n y_train = np.array(angles)\n yield sklearn.utils.shuffle(X_train, y_train)",
"def gen_batch_function(data_folder, image_shape, seed=None, samples_limit=None):\n # Grab image and label paths\n image_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))\n label_paths = {\n re.sub(r'_(lane|road)_', '_', os.path.basename(path)): path\n for path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))\n }\n background_color = np.array([255, 0, 0])\n\n if samples_limit:\n image_paths = image_paths[0:samples_limit]\n\n samples_n = len(image_paths)\n\n rnd = random.Random(seed)\n\n def get_batches_fn(batch_size):\n \"\"\"\n\t\tCreate batches of training data\n\t\t:param batch_size: Batch Size\n\t\t:return: Batches of training data\n\t\t\"\"\"\n # Shuffle training data\n rnd.shuffle(image_paths)\n # Loop through batches and grab images, yielding each batch\n for batch_i in range(0, samples_n, batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i + batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n # Re-size to image_shape\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n\n # Create \"one-hot-like\" labels by class\n gt_bg = np.all(gt_image == background_color, axis=2)\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)\n\n return get_batches_fn, samples_n",
"def batch_generator(batch_size):\n # Randomly shuffle the order of the files in directory\n files = glob.glob(os.path.join(data_dir, pattern))\n np.random.shuffle(files)\n n_files = len(files)\n\n for batch_num in range(0, n_files, batch_size):\n batch = []\n\n for img_file in files[batch_num:batch_num+batch_size]:\n # Load image from file\n img = scipy.misc.imread(img_file)\n\n # -----------\n # BOOKMARK: File preprocessing steps here\n # -----------\n img = scipy.misc.imresize(img, img_shape)\n # -----------\n\n # Append to the batch\n batch.append(img)\n\n # Yield the current batch\n yield np.array(images)",
"def batch_generator(batch_size):\n\n # Infinite loop.\n while True:\n # Get a list of random indices for images in the training-set.\n idx = np.random.randint(100,size=batch_size)\n \n # Get the pre-computed transfer-values for those images.\n # These are the outputs of the pre-trained image-model.\n transf_values = np.array([transfer_values[_] for _ in idx])\n\n # For each of the randomly chosen images there are\n # at least 5 captions describing the contents of the image.\n # Select one of those captions at random and get the\n # associated sequence of integer-tokens.\n tokens = [caps_markedwords[_] for _ in idx]\n\n # Count the number of tokens in all these token-sequences.\n num_tokens = [len(t) for t in tokens]\n \n # Max number of tokens.\n max_tokens = np.max(num_tokens)\n # Pad all the other token-sequences with zeros\n # so they all have the same length and can be\n # input to the neural network as a numpy array.\n tokens_padded = pad_sequences(tokens,\n maxlen=max_tokens,\n padding='post',\n truncating='post')\n \n # Further prepare the token-sequences.\n # The decoder-part of the neural network\n # will try to map the token-sequences to\n # themselves shifted one time-step.\n decoder_input_data = tokens_padded[:, 0:-1]\n decoder_output_data = tokens_padded[:, 1:]\n\n # Dict for the input-data. Because we have\n # several inputs, we use a named dict to\n # ensure that the data is assigned correctly.\n x_data = \\\n {\n 'decoder_input': decoder_input_data,\n 'transfer_values_input': transf_values\n }\n\n\n # Dict for the output-data.\n y_data = \\\n {\n 'decoder_output': decoder_output_data\n }\n \n yield (x_data, y_data)",
"def test_random_crop(dummy_input):\n # Test the 2D image: H, W, C\n image, label = dummy_input(image_size=(512, 512, 3),\n label_size=(512, 512, 1))\n transform = RandomCrop(size=(64, 64))\n _image, _label = transform(image, label)\n assert _image.shape == (64, 64, image.shape[2])\n assert _label.shape == (64, 64, label.shape[2])\n\n # Test the 3D image: H, W, D, C\n image, label = dummy_input(image_size=(512, 512, 20, 3),\n label_size=(512, 512, 20, 1))\n transform = RandomCrop(size=(64, 64, 8))\n _image, _label = transform(image, label)\n assert _image.shape == (64, 64, 8, image.shape[3])\n assert _label.shape == (64, 64, 8, label.shape[3])",
"def image_generator(df,batch_size,plab,augment=True):\n rng = np.random.RandomState(290615)\n if_train = 1 if plab < 1. else 0\n bi,b_list = 0,df.groupby('business_id').apply(get_biz_id,if_train,batch_size)\n b_list = b_list[b_list!=0]\n b_order = rng.permutation(b_list.index)\n pi,p_list = 0, df[df.iloc[:,-1]==0]['photo_id']\n p_order = rng.permutation(p_list.index)\n while True:\n if rng.rand(1)[0] < plab:\n # aggregate biz_id with outdoor-seating\n biz_id_i = b_list.ix[b_order[bi]]\n photo_train = df[df['business_id']==biz_id_i]['photo_id']\n y_batch = np.asarray(df[df['business_id']==biz_id_i].iloc[:,-1])\n # increase/loop indices for next iteration\n if bi < len(b_list)-1:\n bi += 1\n else:\n bi,b_order = 0,rng.permutation(b_list.index)\n else:\n # pic 32 random non-outdoor-seating pictures\n photo_train = p_list[p_order[pi:(pi+batch_size)]]\n y_batch = np.repeat(0, repeats=len(photo_train), axis=0)\n # increase/loop indices for next iteration\n if pi < len(p_list)-1-batch_size:\n pi += batch_size\n else:\n pi,p_order = 0,rng.permutation(p_list.index)\n batch_size_i = len(photo_train)\n # read and augment photos\n X_batch = np.empty((batch_size_i,h,w,ch))\n for i_ in range(batch_size_i):\n f_ = 'data/train_photos/' + str(photo_train.iloc[i_]) + '.jpg'\n im = Image.open(os.path.realpath(f_))\n im_sml = im.resize((w,h))\n # scale inputs [-1,+1]\n xi = np.asarray(im_sml)/128.-1\n if augment:\n # flip coords horizontally (but not vertically)\n if rng.rand(1)[0] > 0.5:\n xi = np.fliplr(xi)\n # rescale slightly within a random range\n jit = w*0.2\n if rng.rand(1)[0] > 0.1:\n xl,xr = rng.uniform(0,jit,1),rng.uniform(w-jit,w,1)\n yu,yd = rng.uniform(0,jit,1),rng.uniform(h-jit,h,1)\n pts1 = np.float32([[xl,yu],[xr,yu],[xl,yd],[xr,yd]])\n pts2 = np.float32([[0,0],[w,0],[0,h],[w,h]])\n M = cv2.getPerspectiveTransform(pts1,pts2)\n xi = cv2.warpPerspective(xi,M,(w,h))\n # save individual image to X_batch\n X_batch[i_,:,:,:] = xi\n# plt.imsave('data/aug_%i' % i_,(xi+1)/2);plt.close()\n yield([X_batch],y_batch)",
"def two_step_generator(classes: list, paths_list: list, imgs_per_class: int, shape: tuple,\n nb_win: int, greys: bool, nb_to_gen: int, img_gen: ImageDataGenerator) -> list:\n \n datawin = list() \n datagen = list()\n \n for class_ in classes:\n print(class_)\n \n # Images paths list\n class_imgs_path = [paths_list[k] for k in range(len(paths_list)) if class_ in paths_list[k]]\n\n # Randomly choose images\n class_imgs_subset = np.random.choice(class_imgs_path, size=imgs_per_class, replace=False)\n\n # Get images\n class_imgs = get_imgs(class_imgs_subset)\n\n # Step 1: resize and crop on sliding windows\n class_new_imgs = create_windows_imgs(class_imgs, shape=shape, nb_win=nb_win, greys=greys)\n class_new_imgs = np.array(flat_list(class_new_imgs))\n datawin.append(class_new_imgs)\n \n # Step 2: DataGenerator\n class_datagen = datagen_class(class_new_imgs, nb_to_gen, img_gen)\n class_datagen = class_datagen.astype(int)\n\n datagen.append(class_datagen)\n \n return datawin, datagen",
"def generator(lines, batch_size=32, augment=False):\n\n num_samples = len(lines)\n while 1: # Loop generator indefinitely\n shuffle(lines) # Shuffle data between epochs\n for offset in range(0, num_samples, batch_size):\n batch_samples = lines[offset: offset + batch_size]\n\n images = []\n steer_angles = []\n\n for line in batch_samples:\n image = ndimage.imread(line[0])\n steer_angle = line[1]\n\n # Apply data augmentation as necessary\n if augment:\n image, steer_angle = random_horizontal_flip(\n image, steer_angle)\n # image, steer_angle = random_all(image, steer_angle)\n # image = random_shadows(image)\n # image = random_gaussian(image)\n\n images.append(image)\n steer_angles.append(steer_angle)\n\n # Convert lists to numpy arrays for use with Keras\n X_data = np.array(images)\n y_data = np.array(steer_angles)\n\n yield shuffle(X_data, y_data)",
"def my_generator(batch_size, img_dir):\n\timage_filenames = glob.glob(img_dir + \"/*\")\n\tcounter = 0\n\twhile True:\n\t\tbw_images = np.zeros((batch_size, config.width, config.height))\n\t\tcolor_images = np.zeros((batch_size, config.width, config.height, 3))\n\t\trandom.shuffle(image_filenames) \n\t\tif ((counter+1)*batch_size>=len(image_filenames)):\n\t\t\t counter = 0\n\t\tfor i in range(batch_size):\n\t\t\t img = Image.open(image_filenames[counter + i]).resize((config.width, config.height))\n\t\t\t color_images[i] = np.array(img)\n\t\t\t bw_images[i] = np.array(img.convert('L'))\n\t\tyield (bw_images, color_images)\n\t\tcounter += batch_size",
"def batch_generator(data, batch_size):\r\n data = np.array(data)\r\n n_batches = int(np.ceil(len(data) / float(batch_size)))\r\n \r\n idx = np.random.permutation(len(data))\r\n data_shuffled = data[idx]\r\n \r\n for i in range(n_batches):\r\n start = i * batch_size\r\n end = start + batch_size\r\n\r\n batch = data_shuffled[start:end]\r\n if len(batch) < batch_size:\r\n # Pad with zeros \r\n pad = np.zeros((batch_size - batch.shape[0], batch.shape[1]),\r\n dtype=batch.dtype)\r\n batch = np.vstack((batch, pad))\r\n\r\n yield batch",
"def test_generator(self, test_path):\n\n img_list = os.scandir(test_path)\n for img_entry in img_list:\n\n img = cv2.imread(img_entry.path, COLOR_TO_OPENCV[self.color_mode])\n if img.shape[-1] == 3:\n orig_shape = img.shape[-2::-1]\n else:\n orig_shape = img.shape[::-1]\n\n\n img = cv2.resize(img, tuple(self.target_size))\n img = img / 255\n if self.color_mode == \"grayscale\":\n img = np.reshape(img, img.shape + (1,))\n img = np.reshape(img, (1,) + img.shape)\n yield img, img_entry, orig_shape",
"def get_generators(patch_size, batch_size, preprocess_func, output_reshape_func, num_validation, train_processes,\n train_cache, train_data_dir='data/train/'):\n\n dirs = util.get_data_list(train_data_dir)\n labels = util.parse_labels_months()\n train_paths, validation_paths = util.train_validation_split(dirs, labels)\n # generate train batch loader\n train_data_loader = CTBatchLoader(train_paths, batch_size, patch_size, num_threads_in_multithreaded=1,\n preprocess_func=preprocess_func)\n\n train_transforms = get_train_transform(patch_size)\n train_data_generator = MultiThreadedAugmenter(train_data_loader, train_transforms, num_processes=train_processes,\n num_cached_per_queue=train_cache, seeds=None, pin_memory=False)\n\n # wrapper to be compatible with keras\n train_generator_keras = KerasGenerator(train_data_generator, output_reshapefunc=output_reshape_func)\n\n # generate validation batch loader\n valid_data_loader = CTBatchLoader(validation_paths, num_validation, patch_size,\n num_threads_in_multithreaded=1, preprocess_func=preprocess_func)\n valid_transforms = get_valid_transform(patch_size)\n valid_data_generator = MultiThreadedAugmenter(valid_data_loader, valid_transforms, num_processes=1,\n num_cached_per_queue=1, seeds=None, pin_memory=False)\n # wrapper to be compatible with keras\n valid_generator_keras = KerasGenerator(valid_data_generator, output_reshape_func, 1)\n\n return train_generator_keras, valid_generator_keras",
"def _generate_crop_images(\n crop_boxes, image, points_grid, layer_idxs, target_size, original_size, input_data_format=None\n):\n cropped_images = []\n total_points_per_crop = []\n for i, crop_box in enumerate(crop_boxes):\n left, top, right, bottom = crop_box\n\n channel_dim = infer_channel_dimension_format(image, input_data_format)\n if channel_dim == ChannelDimension.LAST:\n cropped_im = image[top:bottom, left:right, :]\n else:\n cropped_im = image[:, top:bottom, left:right]\n\n cropped_images.append(cropped_im)\n\n cropped_im_size = get_image_size(cropped_im, channel_dim)\n points_scale = np.array(cropped_im_size)[None, ::-1]\n\n points = points_grid[layer_idxs[i]] * points_scale\n normalized_points = _normalize_coordinates(target_size, points, original_size)\n total_points_per_crop.append(normalized_points)\n\n return cropped_images, total_points_per_crop",
"def _get_next_minibatch(self):\n images = np.zeros((self._batch_size, 3, self._crop_h, self._crop_w), dtype=np.float32)\n masks = np.zeros((self._batch_size, 1, self._crop_h, self._crop_w), dtype=np.float32)\n\n shuffled_batch = np.arange(self._batch_size)\n np.random.shuffle(shuffled_batch)\n for batch_index in shuffled_batch:\n blob_queue = self._blob_queue.get()\n images[batch_index, :, :, :] = blob_queue[0]\n masks[batch_index, :, :, :] = blob_queue[1]\n\n return [images, masks]",
"def __call__(self, batch_size=20, shuffle=True, augment=True):\r\n\r\n if batch_size < 1:\r\n raise ValueError(\"batch_size must be more than 1.\")\r\n if shuffle:\r\n self.shuffle()\r\n\r\n for start in range(0, self.length, batch_size):\r\n batch = self.perm(start, start+batch_size)\r\n if augment:\r\n assert self._augmenter is not None, \"you have to set an augmenter.\"\r\n yield self._augmenter.augment_dataset(batch, method=[ia.ImageAugmenter.NONE, ia.ImageAugmenter.FLIP])\r\n else:\r\n yield batch",
"def gen_batches_functions(data_folder, image_paths, image_shape, out_shape,\n label_folder):\n\n def get_batches_fn(batch_size):\n \"\"\"\n Create batches of training data\n :param batch_size: Batch Size\n :return: Batches of training data\n \"\"\"\n id_road = 7\n id_lane = 6\n id_car = 10\n\n for batch_i in range(0, len(image_paths), batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i + batch_size]:\n # Get corresponding label img path\n gt_image_file = image_file.replace('CameraRGB', 'CameraSeg')\n # Read rgb and label images\n img_in = scipy.misc.imread(image_file, mode='RGB')\n gt_in = scipy.misc.imread(gt_image_file)\n # Crop sky part of the image\n image = img_in[-out_shape[0]:, :]\n gt_image = gt_in[-out_shape[0]:, :, 0]\n # Obtain labels\n gt_road = ((gt_image == id_road) | (gt_image == id_lane))\n gt_car = (gt_image == id_car)\n gt_car[-105:, :] = False\n gt_bg = np.invert(gt_car | gt_road)\n # Augmentation\n if bool(random.getrandbits(1)):\n image, gt_bg, gt_car, gt_road = flip_img(\n image, gt_bg, gt_car, gt_road)\n\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_car = gt_car.reshape(*gt_car.shape, 1)\n gt_road = gt_road.reshape(*gt_road.shape, 1)\n\n gt_image = np.concatenate((gt_bg, gt_car, gt_road), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)\n\n return get_batches_fn",
"def generate_batch(X_train, y_train, batch_size=64):\r\n images = np.zeros((batch_size, 66, 200, 3), dtype=np.float32)\r\n angles = np.zeros((batch_size,), dtype=np.float32)\r\n while True:\r\n straight_count = 0\r\n for i in range(batch_size):\r\n # Select a random index to use for data sample\r\n sample_index = random.randrange(len(X_train))\r\n image_index = random.randrange(len(X_train[0]))\r\n angle = y_train[sample_index][image_index]\r\n # Limit angles of less than absolute value of .1 to no more than 1/2 of data\r\n # to reduce bias of car driving straight\r\n if abs(angle) < .1:\r\n straight_count += 1\r\n if straight_count > (batch_size * .5):\r\n while abs(y_train[sample_index][image_index]) < .1:\r\n sample_index = random.randrange(len(X_train))\r\n # Read image in from directory, process, and convert to numpy array\r\n image = cv2.imread('data/' + str(X_train[sample_index][image_index]))\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n image = process_image(image)\r\n image = np.array(image, dtype=np.float32)\r\n # Flip image and apply opposite angle 50% of the time\r\n if random.randrange(2) == 1:\r\n image = cv2.flip(image, 1)\r\n angle = -angle\r\n images[i] = image\r\n angles[i] = angle\r\n yield images, angles",
"def get_batches(dirname,\n gen=keras.preprocessing.image.ImageDataGenerator(),\n shuffle=True,\n batch_size=1,\n target_size=(224, 224),\n class_mode=\"categorical\"):\n return gen.flow_from_directory(dirname,\n shuffle=shuffle,\n batch_size=batch_size,\n target_size=target_size,\n class_mode=class_mode)"
] |
[
"0.75824314",
"0.6719259",
"0.63412005",
"0.6340442",
"0.6336577",
"0.6271357",
"0.62369394",
"0.61656475",
"0.6124723",
"0.61019236",
"0.6051693",
"0.6036598",
"0.59852403",
"0.5972355",
"0.596388",
"0.5952421",
"0.5929974",
"0.59115386",
"0.5901182",
"0.5863039",
"0.5820809",
"0.5819543",
"0.5813546",
"0.5812051",
"0.5808443",
"0.580115",
"0.57827634",
"0.5773646",
"0.57719606",
"0.5764916"
] |
0.74894965
|
1
|
To be used in conjunction with loss.binary_xentropy_with_sigmoid
|
def sigmoid_with_binary_xentropy(z):
return sigmoid(z)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def sigmoid(x):\r\n #pred_x = (np.exp(x) - np.exp(-x)) / (np.exp(x) + np.exp(-x))\r\n pred_x = 1.0 / (1.0 + np.exp(-x))\r\n return pred_x\r\n pass",
"def test_sigmoid_cross_entropy(self):\n loss_op = pointwise_losses.SigmoidCrossEntropy()\n\n y_pred = loss_op.final_activation_op({\n \"logits\": self.logits,\n \"metadata\": {\n \"mask\": self.mask\n }\n })\n assert np.isclose(y_pred[0][0].numpy(), 0.54905695, atol=1e-5)\n assert np.isclose(y_pred[2][4].numpy(), 0., atol=1e-5)\n\n loss = loss_op({\"mask\": self.mask}, self.y_true, y_pred)\n assert np.isclose(loss, 0.6905699, atol=1e-5)",
"def _sigmoid(x):\n return 1 / (1 + np.exp(-x))",
"def sigmoid_activation(x):\n return 1.0 / (1 + np.exp(-x))",
"def sigmoid(x):\n return 1 / (1 + exp(-x))",
"def sigmoid(x):\n return 1 / (1 + exp(-x))",
"def sigmoid(x):\n return 1 / (1 + np.exp(-x))",
"def sigmoid(x):\n return 1 / (1 + np.exp(-x))",
"def sigmoid(x):\n return 1 / (1 + np.exp(-x))",
"def sigmoid(x):\n return 1 / (1 + np.exp(-x))",
"def sigmoid(x):\n return 1 / (1 + np.exp(-x))",
"def sigmoid(x):\n return 1 / (1 + np.exp(-x))",
"def sigmoid(x):\r\n return 1 / (1 + np.exp(-x))",
"def sigmoid(x):\n return 1/(1+np.exp(-1*x))",
"def sigmoid(x):\n return 1 / (1 * np.exp(-x))",
"def sigmoid(x):\n return 1. / (1. + np.exp(-x))",
"def sigmoid(x):\n return 1.0/(1 + np.exp(-x))",
"def sigmoid(X):\n return 1 / (1 + np.exp(-X))",
"def sigmoid(X):\n return 1 / (1 + np.exp(-X))",
"def sigmoid(x):\n return 1.0 / (1.0 + np.exp(-x))",
"def sigmoid(x):\n return 1 / (1 + math.exp(-x))",
"def sigmoid(x):\n return 1.0/(1.0+exp(-x))",
"def sigmoid(x):\n\treturn 1 / (1 + m.exp(-x))",
"def act_sigmoid_scaled(x):\n return tf.nn.sigmoid(x) * tf.math.log(max_sales) * 1.2",
"def sigmoid(x):\r\n\r\n return 1 / (1 + np.exp(-x))",
"def sigmoid(x):\n return 1 / (1 + math.exp(-x))",
"def sigmoid(x):\n\n return 1 / (1 + math.exp(-x))",
"def sigmoid(x):\n\n return 1 / (1 + math.exp(-x))",
"def sigmoid(self, x):\n self.x = x\n output = 1 / (1 + np.exp(-x))\n return output",
"def sigmoid(x):\n return 1/(1 + math.exp(-x))"
] |
[
"0.7591269",
"0.75822216",
"0.7455182",
"0.74292374",
"0.74232256",
"0.74232256",
"0.74083984",
"0.74083984",
"0.74083984",
"0.74083984",
"0.74083984",
"0.74083984",
"0.7393072",
"0.73908216",
"0.73857445",
"0.73740834",
"0.73655087",
"0.73640925",
"0.73640925",
"0.73588455",
"0.7335531",
"0.73263943",
"0.7313945",
"0.7307331",
"0.7304229",
"0.7286178",
"0.727832",
"0.727832",
"0.7261115",
"0.72601986"
] |
0.8279197
|
0
|
To be used in conjunction with loss.xentropy_with_softmax
|
def softmax_with_xentropy(z):
return softmax(z)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def loss(params: hk.Params, batch, label) -> jnp.ndarray:\r\n logits = net.apply(params, batch)\r\n labels = jax.nn.one_hot(label, n_classes)\r\n\r\n # Cross Entropy Loss\r\n softmax_xent = -jnp.sum(labels * jax.nn.log_softmax(logits))\r\n softmax_xent /= labels.shape[0]\r\n return softmax_xent",
"def test_softmax_cross_entropy(self):\n loss_op = listwise_losses.SoftmaxCrossEntropy()\n\n y_pred = loss_op.final_activation_op({\n \"logits\": self.logits,\n \"metadata\": {\n \"mask\": self.mask\n }\n })\n assert np.isclose(y_pred[0][0].numpy(), 0.19868991, atol=1e-5)\n assert np.isclose(y_pred[2][4].numpy(), 0.0, atol=1e-5)\n\n loss = loss_op({\"mask\": self.mask}, self.y_true, y_pred)\n assert np.isclose(loss, 1.306335, atol=1e-5)",
"def xentropy_loss(self, logits, labels):\n labels = tf.cast(labels, tf.int32)\n logits = tf.reshape(logits, [tf.shape(logits)[0], -1, self.num_classes])\n labels = tf.reshape(labels, [tf.shape(labels)[0], -1])\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits, labels=labels, name=\"loss\")\n\n return loss",
"def my_softmax_cross_entropy(preds, labels):\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels)\n # loss = tf.nn.weighted_cross_entropy_with_logits(logits=preds, targets=labels, pos_weight=0.1)\n return tf.reduce_mean(loss)",
"def softmax_cross_entropy(logit, onehot, axis=-1):\n return SoftmaxCrossEntropy(axis).forward(logit, onehot)",
"def my_loss(y_pred,y_true,n_outputs):\n y_true = tf.one_hot(tf.cast(y_true,tf.int64), n_outputs, dtype=tf.float32)\n return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_true,y_pred))",
"def _softmax(x):\n e = K.exp(x - K.max(x, axis=-1, keepdims=True))\n s = K.sum(e, axis=-1, keepdims=True)\n return e / s",
"def loss(logits, labels):\n labels = tf.to_int64(labels)\n# labels = tf.to_float(labels)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='xentropy')\n# y_conv = tf.nn.softmax(logits)\n# cross_entropy = -tf.reduce_sum(labels*tf.log(y_conv))\n return tf.reduce_mean(cross_entropy, name='xentropy_mean')",
"def softmax_cross_entropy(y, label):\r\n losses = np.sum((- np.log(y + g_epsilon) * label), axis=1)\r\n return losses\r\n pass",
"def soft_cross_entropy(input, targets):\n student_likelihood = torch.nn.functional.log_softmax(input, dim=-1)\n targets_prob = torch.nn.functional.softmax(targets, dim=-1)\n return (- targets_prob * student_likelihood).sum(dim=-1).mean()",
"def nt_xent_loss(y_true, y_pred):\n [x,v] = tf.unstack(y_pred, num=2)\n x = tf.math.l2_normalize(x, -1)\n v = tf.math.l2_normalize(v, -1)\n\n batch_size = tf.shape(x)[0]\n masks = tf.one_hot(tf.range(batch_size), batch_size)\n labels = tf.one_hot(tf.range(batch_size), batch_size * 2)\n\n logits_x_x = tf.matmul(x, x, transpose_b=True) / 0.1\n logits_x_x = logits_x_x - masks * 1e9\n\n logits_v_v = tf.matmul(v, v, transpose_b=True) / 0.1\n logits_v_v = logits_v_v - masks * 1e9\n\n logits_x_v = tf.matmul(x, v, transpose_b=True) / 0.1\n logits_v_x = tf.matmul(v, x, transpose_b=True) / 0.1\n\n loss_x = tf.nn.softmax_cross_entropy_with_logits(\n labels, tf.concat([logits_x_v, logits_x_x], 1))\n loss_v = tf.nn.softmax_cross_entropy_with_logits(\n labels, tf.concat([logits_v_x, logits_v_v], 1))\n\n loss = tf.reduce_mean(loss_x + loss_v)\n\n return loss",
"def softmax_cross_entropy_loss(self, y, y_hat):\n batch_size = y.shape[0]\n return -(y - y_hat) / batch_size",
"def softmax_cross_entropy_loss(logit, labels):\n p = softmax(logit)\n loss_i = - labels * np.log(p + 1e-8)\n return np.mean(loss_i)",
"def cross_entropy_loss():\n return nn.CrossEntropyLoss()",
"def test_aux_softmax_cross_entropy(self):\n loss_op = listwise_losses.AuxiliarySoftmaxCrossEntropy()\n\n y_pred = loss_op.final_activation_op({\n \"logits\": self.logits,\n \"metadata\": {\n \"mask\": self.mask\n }\n })\n\n assert np.isclose(y_pred[0][0].numpy(), 0.19868991, atol=1e-5)\n assert np.isclose(y_pred[2][4].numpy(), 0.0, atol=1e-5)\n\n loss = loss_op({\"mask\": self.mask}, self.y_true_aux, y_pred)\n assert np.isclose(loss, 0.88127804, atol=1e-5)",
"def _softmax(self,x):\n e_x = np.exp(x - np.max(x))\n return np.nan_to_num(e_x / np.nan_to_num(e_x.sum(axis=0)))",
"def softmax_loss(x, y):\n probs = np.exp(x - np.max(x, axis=1, keepdims=True))\n probs /= np.sum(probs, axis=1, keepdims=True)\n N = x.shape[0]\n loss = -np.sum(np.log(probs[np.arange(N), y])) / N\n dx = probs.copy()\n dx[np.arange(N), y] -= 1\n dx /= N\n return loss, dx",
"def convert_softmax_with_cross_entropy(g, op, block):\n\n logits = g.get_node(op.input(\"Logits\")[0])\n labels = g.get_node(op.input(\"Label\")[0])\n ignore_index = op.attr(\"ignore_index\")\n axis = op.attr(\"axis\")\n if axis < 0:\n axis = len(infer_shape(logits)) + axis\n\n softmax = _op.nn.softmax(logits, axis=axis)\n\n g.add_node(op.output(\"Softmax\")[0], softmax)\n\n softmax = _op.log(softmax)\n soft_label = op.attr(\"soft_label\")\n if soft_label:\n loss = _op.sum(-labels * softmax, axis=axis)\n else:\n labels_one = _op.one_hot(\n labels,\n on_value=_expr.const(1.0, dtype=\"float32\"),\n off_value=_expr.const(0.0, dtype=\"float32\"),\n depth=infer_shape(logits)[axis],\n axis=axis + 1,\n dtype=\"float32\",\n )\n labels_one = _op.squeeze(labels_one, axis=axis)\n loss = _op.sum(-labels_one * softmax, axis=axis)\n loss = _op.expand_dims(loss, axis=axis)\n if ignore_index != -100: # noly when soft_label is False\n assert not soft_label, \"soft_label and ignore_index cannot be set at the same time.\"\n ignore_mask = _op.not_equal(labels, _expr.const(ignore_index, dtype=\"int64\"))\n ignore_mask = _op.cast(ignore_mask, \"float32\")\n loss = _op.multiply(loss, ignore_mask)\n\n g.add_node(op.output(\"Loss\")[0], loss)",
"def softmax_loss1(x, y):\n # tmp = np.max(x, axis=1, keepdims=True)\n shifted_logits = x - np.max(x, axis=1, keepdims=True)\n Z = np.sum(np.exp(shifted_logits), axis=1, keepdims=True)\n log_probs = shifted_logits - np.log(Z)\n probs = np.exp(log_probs)\n N = x.shape[0]\n # tmp2 = np.arange(N)\n tmp3 = log_probs[np.arange(N), y]\n # tmp4 = log_probs[[0,1,2],[2,5,0]]\n loss = -np.sum(log_probs[np.arange(N), y]) / N\n dx = probs.copy()\n dx[np.arange(N), y] -= 1\n dx /= N\n return loss, dx",
"def loss_fn(y_true,y_pred): \n loss = tf.nn.softmax_cross_entropy_with_logits_v2(y_true,\n y_pred,\n axis=-1,\n )\n loss = tf.reduce_mean(loss,name=\"loss\")\n return loss",
"def entropy(self, **kwargs) -> TensorType:",
"def entropy(self, **kwargs) -> TensorType:",
"def loss(output, y):\n #Computes softmax cross entropy between logits and labels.\n xentropy = tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=y)\n loss = tf.reduce_mean(xentropy)\n\n return loss",
"def cross_entropy(y_observed, p):\n\n pass",
"def _softmax(self, x):\n return np.exp(x - np.max(x)) / np.sum(np.exp(x - np.max(x)))",
"def softmax_with_cross_entropy(predictions, target_index):\n # TODO: Copy from the previous assignment\n shape = predictions.shape\n probs = softmax(predictions)\n if probs.ndim == 1:\n probs = probs[np.newaxis, :]\n loss = cross_entropy_loss(probs, target_index)\n dprediction = probs.copy()\n dprediction[np.arange(probs.shape[0]), target_index] -= 1\n # Градиент делим на batch_size, так как при численном вычислении усредняем дельту по одной координате\n # Тогда как при аналитическом надо учесть это здесь\n return loss, np.resize(dprediction, shape)/probs.shape[0]",
"def softmax_loss(x, y):\n #raise NotImplementedError\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################\n N=x.shape[0]\n\n \n x-=np.max(x,axis=1,keepdims=True)\n temp=np.exp(x)\n dr_vec=np.sum(temp,axis=1,keepdims=True)\n\n nr=(x[np.arange(N),y]).reshape([N,1])\n loss=np.sum(-(nr)+np.log(dr_vec))\n \n loss=(loss/N)\n temp/=dr_vec\n temp[np.arange(N),y] -= 1\n \n dx = temp/N\n \n return loss, dx",
"def softmax_categorical_crossentropy(y_pred, y_true):\n with tf.name_scope(\"SoftmaxCrossentropy\"):\n return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_pred,\n y_true))",
"def softmax_cross_entropy(y, y_hat):\n loss = cross_entropy(y, softmax(y_hat))\n\n filter_ = ~tf.math.is_finite(loss)\n replace_ = tf.zeros_like(loss)\n\n return tf.where(filter_, replace_, loss)",
"def softmax(x):\r\n output = np.exp(x)\r\n return output / np.sum(output, axis=1, keepdims=True)"
] |
[
"0.726201",
"0.7222704",
"0.7183105",
"0.7172407",
"0.7157092",
"0.7130719",
"0.7100268",
"0.7088628",
"0.70864207",
"0.7078267",
"0.70732236",
"0.7006741",
"0.7004395",
"0.7000175",
"0.69368124",
"0.6921162",
"0.69102913",
"0.6891329",
"0.6883931",
"0.68837404",
"0.68784714",
"0.68784714",
"0.68772256",
"0.6862228",
"0.6850462",
"0.6829537",
"0.68249416",
"0.6822797",
"0.6813928",
"0.68125826"
] |
0.7691633
|
0
|
Compute the maximal score for a Yahtzee hand according to the upper section of the Yahtzee score card.
|
def score(hand):
max_score = []
for dice in hand:
max_score.append(hand.count(dice) * dice)
return max(max_score)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def score(hand):\n if (hand==()):\n return 0\n score_board=[0,0,0,0,0,0,0,0,0,0,0,0]\n for dice in hand:\n score_board[dice-1]+=dice\n max_score=max(score_board)\n return max_score",
"def max_score(self):\n return self.raw_possible",
"def highCard(self):\n return max(self)",
"def score(hand):\n max_score = []\n for die in hand:\n max_score.append(hand.count(die) * die)\n return max(max_score)",
"def scoreSevenHand(hand):\n handCombos = list(itertools.combinations(hand, 5))\n return max(scoreFiveHand(hc) for hc in handCombos)",
"def score(hand):\r\n \r\n if not hand:\r\n return 0\r\n \r\n max_score = 0\r\n \r\n for dice in hand:\r\n temp = list(hand).count(dice) * dice\r\n if temp > max_score:\r\n max_score = temp\r\n \r\n return max_score",
"def negamax(self):\n if self.check_winner():\n return 1\n elif self.full():\n return 0\n else:\n bestScore = -10\n for r, c in self.empty_cells():\n self.grid[r][c] = self.player\n self.next_player() \n score = -self.negamax()\n if score > bestScore:\n bestScore = score\n self.grid[r][c] = GameModel.EMPTY\n self.next_player()\n return bestScore",
"def getHighScore(self):\n return max(self.scores)",
"def best_hand(cards):\n return max(generate_all_hands(cards))",
"def max_score(self):\r\n return self.lcp.get_max_score()",
"def personal_best(scores):\n return max(scores)",
"def personal_best(scores):\n# return sorted(scores, reverse=True)[0]\n return max(scores)",
"def score(hand):\n occurrences = [] \n for die in hand:\n if die > len(occurrences):\n occurrences.extend([0 for dummy_idx in range(len(occurrences) ,die)]) \n occurrences[die - 1] += 1\n maxi = 0\n for idx in range(len(occurrences)):\n if (idx+1) * occurrences[idx] > maxi:\n maxi = (idx + 1) * occurrences[idx]\n return maxi",
"def personal_best(scores: list) -> int:\n return max(scores)",
"def max_score(self):\n return max(self._extract_set('score') or [0])",
"def get_high_score(self) -> float:\n return max(self._scores)",
"def score(cards):\n \n values = sorted(map(lambda x: x[0], cards))\n\n if same_suit(cards) and values[0] == 10 and values[4] == 14: # royal flush\n return (10, 14, 0) \n\n if same_suit(cards) and values[4] - values[0] == 4 and len(set(values)) == 5: # straigh flush\n return (9, values[4], 0)\n\n if len(set(values)) == 2 and values[1] == values[3]: # four of a kind\n if values[0] != values[1]:\n high_card = values[0]\n else: high_card = values[4]\n return (8, values[2], high_card)\n\n if len(set(values)) == 2 and values[1] != values[3]: # full house\n return (7, values[2], 0)\n\n if same_suit(cards): # flush\n return (6, values[4], 0)\n\n if values[4] - values[0] == 4 and len(set(values)) == 5: # straight\n return (5, values[4], 0)\n\n if len(set(values)) == 3: # three of a kind or two pair\n # three of a kind\n if values[0] == values[2]:\n return (4, values[0], max(values[3:5]))\n if values[1] == values[3]:\n return (4, values[1], max(values[0], values[4]))\n if values[2] == values[4]: \n return (4, values[2], max(values[0:2]))\n else: # two pair\n return (3, max(values[1], values[3]), dict((values.count(i), i) for i in values)[1])\n\n if len(set(values)) == 4: # one pair\n high_value_card = dict((values.count(i), i) for i in values)[2]\n s = set(values)\n s.remove(high_value_card)\n return (2, high_value_card, max(s))\n\n return (1, values[4], 0)",
"def max_score(self):\n return self.points",
"def get_big_joker_value(deck):\n \n return max(deck)",
"def get_big_joker_value(deck: List[int]) -> int:\n return max(deck)",
"def worst_score(self):\r\n pass",
"def score(hand):\n counted = []\n scores = []\n for element in hand:\n if element not in counted:\n scores.append(hand.count(element)*element)\n counted.append(element)\n return max(scores)",
"def getMaxAlignmentScore(self):\n # get max of each row\n # max_scores = [max(i) for i in self.matrix]\n\n # return the max of the max vaules\n return numpy.max(self.matrix)",
"def max(scores):\n return __builtin__.max(scores) if len(scores) else 0",
"def get_max_score(self):\r\n maxscore = 0\r\n for responder in self.responders.values():\r\n maxscore += responder.get_max_score()\r\n return maxscore",
"def get_big_joker_value(deck_of_cards):\n big_joker_value = max(deck_of_cards)\n return big_joker_value\n # big_joker is the largest card, thus max() function",
"def highest_value():\n maximum_number = 0\n for i in xrange(length):\n challenger = frames[i]\n if abs(challenger) > maximum_number:\n maximum_number = abs(challenger)\n return maximum_number",
"def best_hand(hands):\r\n best_val = 0\r\n sum = 0\r\n hand = None\r\n for h in hands:\r\n for t in h:\r\n sum = sum + t[1]\r\n if sum > best_val:\r\n best_val = sum\r\n hand = h\r\n\r\n return hand",
"def findMaxFactor(self):\n factorMax = 0\n factorMaxInd = ''\n for ue in list(self.ues.keys()):\n if len(self.ues[ue].bearers[0].buffer.pckts)>0 and self.ues[ue].pfFactor>factorMax:\n factorMax = self.ues[ue].pfFactor\n factorMaxInd = ue\n if factorMaxInd=='':\n ue = list(self.ues.keys())[self.ind_u]\n q = 0\n while len(self.ues[ue].bearers[0].buffer.pckts)==0 and q<len(self.ues):\n self.updIndUE()\n ue = list(self.ues.keys())[self.ind_u]\n q = q + 1\n factorMaxInd = ue\n\n return factorMaxInd",
"def pwm_max_score(self):\n if self.max_score is None:\n score = 0\n for row in self.pwm:\n score += log(max(row) / 0.25 + 0.01)\n self.max_score = score\n \n return self.max_score"
] |
[
"0.7198057",
"0.69047964",
"0.6868163",
"0.68407416",
"0.68071705",
"0.67827207",
"0.67775255",
"0.6729456",
"0.6722615",
"0.67012155",
"0.66802895",
"0.66660815",
"0.6656922",
"0.6642546",
"0.6636847",
"0.66343987",
"0.66135156",
"0.6605659",
"0.65514874",
"0.6536933",
"0.65284574",
"0.6485154",
"0.64590675",
"0.6442747",
"0.6436084",
"0.642072",
"0.641884",
"0.63745767",
"0.63730747",
"0.63632774"
] |
0.6956893
|
1
|
Generate all possible choices of dice from hand to hold.
|
def gen_all_holds(hand):
held_dice = [()]
for dice in hand:
for dummy_dice in held_dice:
held_dice = held_dice + [tuple(dummy_dice) + (dice, )]
return set(held_dice)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def gen_all_holds(hand):\r\n possible_holds = set([()])\r\n \r\n for dice in hand:\r\n temp_holds = possible_holds.copy()\r\n for hold in temp_holds:\r\n temp_seq = list(hold)\r\n temp_seq.append(dice)\r\n possible_holds.add(tuple(temp_seq))\r\n \r\n return possible_holds",
"def yatzy_dice():\n return [random_die() for _ in range(5)]",
"def dice():\n return random.randrange(1, 7)",
"def reroll_selected_dice(selected_dice, yatzy_dice):\n for die in selected_dice:\n yatzy_dice[die] = random_die()",
"def dealHand(n):\n hand={}\n numVowels = n / 3\n \n for i in range(numVowels):\n x = VOWELS[random.randrange(0,len(VOWELS))]\n hand[x] = hand.get(x, 0) + 1\n \n for i in range(numVowels, n): \n x = CONSONANTS[random.randrange(0,len(CONSONANTS))]\n hand[x] = hand.get(x, 0) + 1\n \n return hand",
"def deal_hand(n):\n hand = {}\n num_vowels = n // 3\n\n for i in range(num_vowels):\n x = VOWELS[random.randrange(0, len(VOWELS))]\n hand[x] = hand.get(x, 0) + 1\n\n for i in range(num_vowels, n):\n x = CONSONANTS[random.randrange(0, len(CONSONANTS))]\n hand[x] = hand.get(x, 0) + 1\n\n return hand",
"def rollDices():\n for i in range(5):\n dices[i] = randint(1, 6)",
"def dealHand(n):\r\n hand={}\r\n numVowels = n // 3\r\n \r\n for i in range(numVowels):\r\n x = VOWELS[random.randrange(0,len(VOWELS))]\r\n hand[x] = hand.get(x, 0) + 1\r\n \r\n for i in range(numVowels, n): \r\n x = CONSONANTS[random.randrange(0,len(CONSONANTS))]\r\n hand[x] = hand.get(x, 0) + 1\r\n \r\n return hand",
"def slot_choke(self):\n if self.choke:\n _choke = [1 for x in range(8)]\n else:\n _choke = [random.randint(0,4) for x in range(8)]\n \n return _choke",
"def roll_dices():\n dices = []\n\n for i in range(DICE_COUNT):\n dice = random.randrange(MIN_DICE, MAX_DICE + 1)\n dices.append(dice)\n\n return dices",
"def get_outcomes(num_die_sides):\n outcomes = []\n\n for value in range(1, num_die_sides + 1):\n outcomes.append(value)\n\n return outcomes\n\n\n \"\"\"\n Iterative function that enumerates the set of all sequences of\n outcomes of given length.\n DO NOT MODIFY.\n\n outcomes: possible values of a roll (ex. -- [1,2,3,4,5,6] for a 6-sided die)\n \"\"\"\n\n answer_set = set([()])\n for dummy_idx in range(length):\n temp_set = set()\n for partial_sequence in answer_set:\n for item in outcomes:\n new_sequence = list(partial_sequence)\n new_sequence.append(item)\n temp_set.add(tuple(new_sequence))\n answer_set = temp_set\n return answer_set",
"def dealHand(n: int) -> d_si:\n hand = {} # type: Dict [str, int]\n numVowels = n // 3\n\n for _ in range(numVowels):\n x = VOWELS[random.randrange(0,len(VOWELS))]\n hand[x] = hand.get(x, 0) + 1\n\n for _ in range(numVowels, n): # Or (n - numVowels)\n x = CONSONANTS[random.randrange(0,len(CONSONANTS))]\n hand[x] = hand.get(x, 0) + 1\n\n return hand",
"def select_hands_for_players(self):\n return [random.choice(h.possible_hands) for h in self.holdem_ranges]",
"def strategy(hand, num_die_sides):\n\n possible_holds = gen_all_holds(hand)\n best_val = 0\n best_score = 0\n dice_to_hold = []\n\n for hold in possible_holds:\n hold_val = expected_value(hold, NUM_DIE_SIDES, NUM_DICE - len(hold))\n\n hand_score = score(hold) + score(hand)\n if hand_score > best_val:\n # best_val = hold_val\n best_score = hand_score\n dice_to_hold = hold\n hand_copy = list(hand)\n sugg_hand = hand_copy.append(dice_to_hold)\n return (hand_score, sugg_hand)",
"def throw_table(n, d=6, type='classical'):\n table = None\n roll = range(1, d+1)\n \n if type == 'classical':\n table = list(itertools.product(roll, repeat=n))\n else:\n table = list(itertools.combinations(roll, n))\n if type == 'bosonic':\n # TODO: This only works for 2 dice!!!!\n for i in roll:\n table.append((i,i))\n\n return table",
"def pick_dice(sorted_dice):\n print(f'\\nYour sorted dice result is: {sorted_dice}')\n player_picks = input(fill('Here is your sorted dice result. Please enter 1-4 unique numbers in the range of 1-5 to'\n ' represent the selection of dice you want to hold. the numbers represents the location '\n 'of die in the dice list from left to right. For example if you want to hold 2 dice that '\n 'are on the left of the sorted dice list, you will enter \"12\". Warning: if you enter '\n 'anything else, the system will treat it as if you choose not to hold any dice: ',\n TXT_WIDTH()))\n dice = [[], []]\n if re.match(r'^(?!.*(.).*\\1)[1-5]{1,4}$', player_picks):\n picks_list = [int(pick) for pick in player_picks]\n index_list = [pick - 1 for pick in picks_list]\n for index in index_list:\n dice[0].append(sorted_dice[index])\n for die in range(TOTAL_NUMBER_OF_DICE() - len(dice[0])):\n dice[1].append(0)\n else:\n for die in sorted_dice:\n dice[1].append(0)\n return dice",
"def roll_dice():\n numbers = ['1', '2', '3', '4', '5', '6']\n return random.choice(numbers)",
"def shuffle_choices(self, choices, rng):\r\n # Separate out a list of the stuff to be shuffled\r\n # vs. the head/tail of fixed==true choices to be held back from the shuffle.\r\n # Rare corner case: A fixed==true choice \"island\" in the middle is lumped in\r\n # with the tail group of fixed choices.\r\n # Slightly tricky one-pass implementation using a state machine\r\n head = []\r\n middle = [] # only this one gets shuffled\r\n tail = []\r\n at_head = True\r\n for choice in choices:\r\n if at_head and choice.get('fixed') == 'true':\r\n head.append(choice)\r\n continue\r\n at_head = False\r\n if choice.get('fixed') == 'true':\r\n tail.append(choice)\r\n else:\r\n middle.append(choice)\r\n rng.shuffle(middle)\r\n return head + middle + tail",
"def dice(name):",
"def throw_dice():\n dice_1 = random.randrange(1,7)\n dice_2 = random.randrange(1,7)\n return sorted((dice_1,dice_2))",
"def wyldingHand(self, level):\n if level == 0:\n die_result = random.randint(1,6)\n elif level == 1:\n die_result = random.randint(1,10)\n elif level == 2:\n die_result = random.randint(1,6) + random.randint(1,6)\n elif level == 3:\n die_result = random.randint(1,8) + random.randint(1,8)\n\n return die_result",
"def gen_all_holds(hand):\n \n answer_set = set([()])\n for dummy_idx in range(len(hand)):\n temp_set = set()\n for partial_sequence in answer_set:\n for item in range(1,len(hand)+1):\n new_sequence = list(partial_sequence)\n new_sequence.append(item)\n if set(tuple(new_sequence)).issubset(set(range(1,len(hand)+1))):\n temp_set.add(tuple(set(new_sequence)))\n answer_set = answer_set.union(temp_set)\n answer_set2 = set([()])\n for seq in answer_set:\n temp_seq = []\n for element in seq: \n temp_el = hand[element -1]\n temp_seq.append(temp_el)\n answer_set2.add(tuple(temp_seq))\n return answer_set2",
"def gen_all_holds(hand):\n without_repeat = []\n mask_seque = list(gen_all_sequences([0,1], len(hand)))\n for dum_i in mask_seque:\n without_repeat.append(())\n \n for dum_i in range(len(mask_seque)):\n for dum_j in range(len(mask_seque[dum_i])):\n if (mask_seque[dum_i][dum_j]==1):\n without_repeat[dum_i]=list(without_repeat[dum_i])\n without_repeat[dum_i].append(hand[dum_j])\n without_repeat[dum_i]=tuple(without_repeat[dum_i])\n \n without_repeat = set(tuple(without_repeat))\n return without_repeat",
"def picksomechoices(question, answer):\n \"\"\" because of the way dict() works all 4 choices will be unique \"\"\"\n choices = dict()\n choices[question] = answer\n for choice in random.sample(nlist, 10):\n choices[choice[0]] = choice[1]\n if len(choices.keys()) > 3:\n break\n\n return choices",
"def picksomechoices(question, answer):\n \"\"\" because of the way dict() works all 4 choices will be unique \"\"\"\n choices = dict()\n choices[question] = answer\n for choice in random.sample(nlist, 10):\n choices[choice[0]] = choice[1]\n if len(choices.keys()) > 3:\n break\n\n return choices",
"def picksomechoices(question, answer):\n \"\"\" because of the way dict() works all 4 choices will be unique \"\"\"\n choices = dict()\n choices[question] = answer\n for choice in random.sample(nlist, 10):\n choices[choice[0]] = choice[1]\n if len(choices.keys()) > 3:\n break\n\n return choices",
"def picksomechoices(question, answer):\n \"\"\" because of the way dict() works all 4 choices will be unique \"\"\"\n choices = dict()\n choices[question] = answer\n for choice in random.sample(nlist, 10):\n choices[choice[0]] = choice[1]\n if len(choices.keys()) > 3:\n break\n\n return choices",
"def rollDie():\n return random.choice([1, 2, 3, 4, 5, 6])",
"def strategy(hand, num_die_sides):\n all_holds = list(gen_all_holds(hand))\n expect=[]\n for held_dice in all_holds:\n expect.append(expected_value(held_dice, num_die_sides, len(hand)-len(held_dice)))\n max_expect_index = expect.index(max(expect))\n return (max(expect), (all_holds[max_expect_index]))",
"def determine_roll(self):\n dice_to_roll = []\n to_roll = input(\"Roll dice: \")\n if 'a' in to_roll:\n dice_to_roll.append(self.die_a)\n\n if 'b' in to_roll:\n dice_to_roll.append(self.die_b)\n\n return dice_to_roll"
] |
[
"0.66850966",
"0.6451011",
"0.6337711",
"0.63297033",
"0.63039804",
"0.6226844",
"0.6226421",
"0.62156814",
"0.6166249",
"0.61634064",
"0.61588705",
"0.61474586",
"0.6074418",
"0.60378057",
"0.6029834",
"0.60106313",
"0.598691",
"0.59858495",
"0.594391",
"0.59296936",
"0.5920476",
"0.59076583",
"0.58997256",
"0.589188",
"0.589188",
"0.589188",
"0.589188",
"0.5879852",
"0.5872513",
"0.58707523"
] |
0.7070763
|
0
|
Compute the hold that maximizes the expected value when the discarded dice are rolled.
|
def strategy(hand, num_die_sides):
best_hold = (0.0, ())
current_score = 0
for held_dice in gen_all_holds(hand):
score = expected_value(held_dice, num_die_sides, len(hand) - len(held_dice))
if score > current_score:
current_score = score
best_hold = (current_score, held_dice)
return best_hold
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def expected_value(held_dice, num_die_sides, num_free_dice):\n outcomes = get_outcomes(num_die_sides)\n print \"outcomes:\", outcomes\n\n # generate all possible sequences of rolls\n all_rolls = list(gen_all_sequences(outcomes, num_free_dice))\n results = [max_repeats(roll) for roll in all_rolls]\n value = 0.0 \n\n\n for result in all_rolls:\n curr_hand = tuple(list(held_dice) + list(result))\n value += score(curr_hand)\n\n return value / len(all_rolls)",
"def strategy(hand, num_die_sides):\n all_holds = gen_all_holds(hand)\n expected_values = {}\n for hold in all_holds:\n num_free_dice = len(hand) - len(hold)\n current_expexted_value = expected_value(hold, num_die_sides, num_free_dice)\n expected_values[current_expexted_value] = hold\n\n max_value = max(expected_values.keys())\n return tuple((max_value, expected_values[max_value]))",
"def strategy(hand, num_die_sides):\n all_holds = list(gen_all_holds(hand))\n expect=[]\n for held_dice in all_holds:\n expect.append(expected_value(held_dice, num_die_sides, len(hand)-len(held_dice)))\n max_expect_index = expect.index(max(expect))\n return (max(expect), (all_holds[max_expect_index]))",
"def strategy(hand, num_die_sides):\r\n \r\n best_value = 0.0\r\n best_hold = ()\r\n \r\n possible_holds = gen_all_holds(hand)\r\n \r\n for hold in possible_holds:\r\n current_value = expected_value(hold, num_die_sides, len(hand) - len(hold))\r\n if current_value > best_value:\r\n best_value = current_value\r\n best_hold = hold\r\n \r\n return (best_value, best_hold)",
"def strategy(hand, num_die_sides):\n #return (0.0, ())\n maxval = 0.0\n maxseq= ()\n allholds = gen_all_holds(hand)\n for seq in allholds:\n val = expected_value(seq, num_die_sides, len(hand)-len(seq))\n if val > maxval:\n maxval = val\n maxseq = seq\n \n \n \n return (maxval, maxseq)",
"def strategy(hand, num_die_sides):\n result = (0.0, ())\n current_value = float('-inf')\n \n for item in gen_all_holds(hand):\n value = expected_value(item, num_die_sides, len(hand) - len(item))\n if value > current_value:\n current_value = value\n result = (current_value, item)\n \n return result",
"def expected_value(held_dice, num_die_sides, num_free_dice):\n result = 0\n outcomes = range(1, num_die_sides + 1)\n possible = sorted(gen_all_sequences(outcomes, num_free_dice))\n for hand in possible:\n result += score(held_dice + hand)\n return float(result)/len(possible)",
"def strategy(hand, num_die_sides):\n best_move = (0.0, ())\n all_holds = gen_all_holds(hand)\n for hold in all_holds:\n # hand can be less than 5\n num_free_dice = len(hand) - len(hold)\n expected = expected_value(hold, num_die_sides, num_free_dice)\n if expected > best_move[0]:\n best_move = (expected, hold)\n return best_move",
"def expected_value(held_dice, num_die_sides, num_free_dice):\n\n outcome = ()\n for die in range(1, num_die_sides + 1):\n outcome +=(die, )\n possible_outcomes = gen_all_sequences(outcome, num_free_dice)\n output = 0\n for single_output in possible_outcomes:\n current_score = score(single_output + held_dice)\n output += current_score\n\n return output/(len(possible_outcomes)*1.0)",
"def expected_value(held_dice, num_die_sides, num_free_dice):\n all_sequences = gen_all_sequences(range(1,num_die_sides+1), num_free_dice)\n iter_seque=[]\n score_seque=[]\n for seq in all_sequences:\n iter_seque.append(list(seq)+list(held_dice))\n score_seque.append(score(iter_seque[-1]))\n return float(sum(score_seque))/float(len(score_seque))",
"def expected_value(held_dice, num_die_sides, num_free_dice):\r\n die_outcomes = set(range(1, num_die_sides + 1))\r\n \r\n possible_sequences = gen_all_sequences(die_outcomes, num_free_dice)\r\n \r\n total_score = 0.0\r\n for sequence in possible_sequences:\r\n total_score += score(held_dice + sequence)\r\n \r\n return float(total_score / len(possible_sequences))",
"def expected_value(held_dice, num_die_sides, num_free_dice):\n list_scores = []\n die_sides = [die for die in range(1, num_die_sides + 1)]\n possible_seq = gen_all_sequences(die_sides, num_free_dice)\n for item in possible_seq:\n list_scores.append(score(held_dice + item))\n \n return float(sum(list_scores)) / len(list_scores)",
"def maximum_roll(self):\n if self.dice_array is None:\n return self.number * self.sides\n else:\n return np.sum(self.dice_array)",
"def strategy(hand, num_die_sides):\n\n possible_holds = gen_all_holds(hand)\n best_val = 0\n best_score = 0\n dice_to_hold = []\n\n for hold in possible_holds:\n hold_val = expected_value(hold, NUM_DIE_SIDES, NUM_DICE - len(hold))\n\n hand_score = score(hold) + score(hand)\n if hand_score > best_val:\n # best_val = hold_val\n best_score = hand_score\n dice_to_hold = hold\n hand_copy = list(hand)\n sugg_hand = hand_copy.append(dice_to_hold)\n return (hand_score, sugg_hand)",
"def expected_value(held_dice, num_die_sides, num_free_dice):\n outcomes = [number+1 for number in range(num_die_sides)]\n die_seqs = list(gen_all_sequences(outcomes, num_free_dice))\n for idx in range(len(die_seqs)):\n seq = list(die_seqs[idx])\n seq.extend(list(held_dice))\n die_seqs[idx] = tuple(seq)\n scr = 0.0\n for seq in die_seqs:\n scr += score(seq) \n return scr / len(die_seqs)",
"def expected_value(held_dice, num_die_sides, num_free_dice):\r\n \r\n scores = []\r\n \r\n die_sides = [(die + 1) for die in range(num_die_sides)]\r\n \r\n pos_outcomes = gen_all_sequences(die_sides, num_free_dice)\r\n\r\n for outcome in pos_outcomes:\r\n scores.append(score(held_dice + outcome))\r\n \r\n expected_result = float(sum(scores))/len(scores)\r\n \r\n return expected_result",
"def max_scoring_num_rolls(dice=six_sided, num_samples=1000):\n # BEGIN PROBLEM 8\n\n \"\"\"maxi, number_of_dice, ret = 0, 10, 0\n while number_of_dice > 0:\n avg = make_averaged(roll_dice)(number_of_dice, dice)\n maxi = max(maxi, avg)\n if avg >= maxi:\n ret = number_of_dice\n number_of_dice -= 1\n return ret\"\"\"\n\n\n\n counterA = 1\n num_rolls=1\n max_value = 0\n best_num_rolls = 0\n while counterA <= 10:\n num_rolls = counterA\n average_function = make_averaged(roll_dice)(counterA, dice)\n if average_function > max_value:\n max_value = average_function\n best_num_rolls = counterA\n counterA +=1\n return best_num_rolls\n\n \"\"\"counterA = 1\n maxvalue = 0\n maxvaluenumber = 0\n while(counterA<=10):\n num_rolls = counterA\n average_for_roll = make_averaged(roll_dice(num_rolls, dice), num_samples)\n counterB = average_for_roll(roll_dice(counterA, dice))\n if(counterB>maxvalue):\n maxvalue = counterB\n maxvaluenumber = counterA\n counterA +=1\n return maxvaluenumber\"\"\"\n # END PROBLEM 8",
"def roll_die(sides = 6, maxi = 6):\n d = 1000\n # discard highest roll(s)\n while d > maxi:\n d = random.randint(1,sides)\n return d",
"def roll(self):\n self.currentValue = choice(self.possibleValues)\n self.value = AngryDie.ANGRY_VALUES[self.currentValue]\n return self.currentValue",
"def big_straight(dice):\n if sorted(dice) == [2, 3, 4, 5, 6]:\n return sum(dice)\n return 0",
"def _calc_hp(self, average=False):\n dice = self.hd + self.constitution\n if average:\n return round((dice * self.level).average)\n\n return max(sum((dice * self.level).roll()), 1)",
"def score(hand):\r\n \r\n if not hand:\r\n return 0\r\n \r\n max_score = 0\r\n \r\n for dice in hand:\r\n temp = list(hand).count(dice) * dice\r\n if temp > max_score:\r\n max_score = temp\r\n \r\n return max_score",
"def roll_die(number_of_rolls: int, number_of_sides: int) -> int:\r\n if number_of_rolls <= 0 or number_of_sides <= 0:\r\n return 0\r\n\r\n max_total = number_of_sides * number_of_rolls\r\n\r\n return random.randint(number_of_rolls, max_total)",
"def roll_dice(roll, modifiers):\n try:\n if modifiers[\"Advantage\"] and not modifiers[\"Disadvantage\"]:\n modifiers[\"Advantage\"] = False\n return max(roll_dice(roll, modifiers), roll_dice(roll,modifiers))\n if modifiers[\"Disadvantage\"] and not modifiers[\"Advantage\"]:\n modifiers[\"Disadvantage\"] = False\n return min(roll_dice(roll, modifiers), roll_dice(roll, modifiers))\n num_dice = int(roll.split(\"D\")[0])\n if modifiers[\"Critical\"]:\n num_dice*=2\n num_dice+=modifiers[\"Brutal\"]\n die_type = roll.split(\"D\")[1]\n if die_type[0] == \"4\" or die_type[0] == \"6\" or die_type[0] == \"8\":\n die_type = int(die_type[0])\n elif die_type[:3] == \"100\" or die_type[0] == \"%\":\n die_type = 100\n elif die_type[:2] == \"10\" or die_type[:2] == \"12\" or die_type[:2] == \"20\":\n die_type = int(die_type[:2])\n else:\n die_type = 6\n roll_total = 0\n critical_success = False\n critical_failure = False\n for die in range(num_dice):\n die_result = random.randint(1,die_type)\n if die_result == 1 and modifiers[\"Lucky\"] or die_result <= 2 and modifiers[\"Great Weapon\"]:\n die_result = random.randint(1,die_type)\n if die_result < modifiers[\"Minimum Roll\"]:\n die_result = modifiers[\"Minimum Roll\"]\n if die_result == 20 and die_type == 20:\n critical_success = True\n if die_result == 1 and die_type == 20:\n critical_failure = True\n roll_total += die_result\n return roll_total\n except ValueError:\n return \"Error\"",
"def roll(self):\n roll = random.random()\n sum = 0\n for item in self.mask:\n sum += item.prob\n if sum >= roll: return item.elem\n return None",
"def max_scoring_num_rolls(dice=six_sided, num_samples=1000):\n # BEGIN PROBLEM 9\n \"*** YOUR CODE HERE ***\"\n k, max_value, max_num = 1, 0, 0\n roll = make_averaged(roll_dice, num_samples)\n while k <= 10:\n current_value = roll(k, dice)\n #print('k: ' + str(k) + ' current_value: ' + str(current_value))\n if current_value > max_value:\n max_value, max_num = current_value, k\n k += 1\n return max_num\n # END PROBLEM 9",
"def temporary_score(self, dice_roll):\n\n temporary_score = 0\n if dice_roll > 1:\n temporary_score += dice_roll\n else:\n temporary_score = 0\n return temporary_score",
"def roll(self):\n return cbrandom.throwDices(\"1d20\")",
"def chance(dice):\n return sum(dice)",
"def remaining_space_in_hold(self):\n balls = self.config['balls_to_hold'] - self.balls_held\n if balls < 0:\n balls = 0\n return balls"
] |
[
"0.7388774",
"0.70694953",
"0.69726455",
"0.696321",
"0.69587135",
"0.6900279",
"0.6876599",
"0.6836615",
"0.67820185",
"0.67149234",
"0.67129946",
"0.6587366",
"0.65404105",
"0.6535527",
"0.65227497",
"0.6499015",
"0.62842536",
"0.6144699",
"0.60962355",
"0.6076315",
"0.5911923",
"0.58954316",
"0.5892068",
"0.58653563",
"0.5827538",
"0.5795526",
"0.57942176",
"0.5789509",
"0.57706076",
"0.57664585"
] |
0.7348299
|
1
|
Find names in a sentence based on a FIRST_NAMES file
|
def find_names(sentence=None, last_names_enabled=True, no_names_enabled=False):
if not sentence:
raise Exception(ParameterMissing, "This method requires sentence as input")
if not isinstance(sentence, str):
raise Exception(TypeError, "This method requires string as input")
first_names = get_first_names_pack()
if not first_names:
raise Exception(VariableNotSet, "Variable FIRST_NAMES is not set in settings.py")
if last_names_enabled:
last_names = get_last_names_pack()
if not last_names:
raise Exception(VariableNotSet, "Variable LAST_NAMES is not set in settings.py")
first_names = list(set(first_names).union(set(last_names)))
if no_names_enabled:
no_names = get_no_names_pack()
if not no_names:
raise Exception(VariableNotSet, "Variable NO_NAMES is not set in settings.py")
first_names = list(set(first_names).difference(set(no_names)))
punctuation = '!@#$%^&*()_+<>?:.,;'
for c in sentence:
if c in punctuation:
sentence = sentence.replace(c, " ")
words = sentence.lower().split()
res = set(words).intersection(first_names)
to_return = [w.title() for w in res]
return to_return
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def find_names(text):\n\n names = []\n\n # spacy doc\n doc = nlp(text)\n\n # pattern\n pattern = [{'LOWER': 'prime'},\n {'LOWER': 'minister'},\n {'POS': 'ADP', 'OP': '?'},\n {'POS': 'PROPN'}]\n\n # Matcher class object\n matcher = Matcher(nlp.vocab)\n matcher.add(\"names\", None, pattern)\n\n matches = matcher(doc)\n\n # finding patterns in the text\n\n for i in range(0, len(matches)):\n\n # match: id, start, end\n token = doc[matches[i][1]:matches[i][2]]\n # append token to list\n names.append(str(token))\n\n # Only keep sentences containing Indian PMs\n\n for name in names:\n if (name.split()[2] == 'of') and (name.split()[3] != \"India\"):\n names.remove(name)\n\n return names",
"def find_names_position(sentence=None, last_names_enabled=True, no_names_enabled=False):\n if not sentence:\n raise Exception(ParameterMissing, \"This method requires sentence as input\")\n\n if not isinstance(sentence, str):\n raise Exception(TypeError, \"This method requires string as input\")\n\n names_found = find_names(sentence, last_names_enabled=last_names_enabled, no_names_enabled=no_names_enabled)\n\n to_return = []\n for name in names_found:\n begin_positions = [m.start() for m in re.finditer(name, sentence)]\n for begin in begin_positions:\n to_return.append((begin, begin + len(name)))\n # begin = sentence.lower().index(name.lower())\n # end = begin + len(name)\n # to_return.append((begin, end))\n\n return to_return",
"def fetch_candidate_name(self):\r\n # variable to save possible matches\r\n possible_names = []\r\n\r\n # source text is input document in text format\r\n nlp_text = self.doc # := nlp(self.stringtext)\r\n\r\n # Add patterns to match proper names\r\n patterns = [[{'POS': 'PROPN'}]]\r\n self.matcher.add('NAME', patterns) \r\n matches = self.matcher(nlp_text) \r\n\r\n # fetch the matches\r\n for match_id, start, end in matches:\r\n span = nlp_text[start:end] \r\n possible_names += [span.text] \r\n if len(possible_names) >= 2: \r\n break\r\n\r\n # Extract candidates\r\n doc_entities = self.doc.ents\r\n\r\n # Subset to person type entities\r\n doc_persons = filter(lambda x: x.label_ == 'PERSON', doc_entities)\r\n doc_persons = filter(lambda x: len(\r\n x.text.strip().split()) >= 2, doc_persons)\r\n doc_persons = map(lambda x: x.text.strip(), doc_persons)\r\n doc_persons = list(doc_persons)\r\n\r\n # Assume the first Person entity with more than two tokens is the candidate's name\r\n if len(doc_persons) > 0:\r\n return possible_names + [doc_persons[0]]\r\n\r\n return \"NOT FOUND\"",
"def get_names(lines): \n next = False \n names = []\n for line in lines:\n if next:\n if len(line) == 1:\n break\n else:\n tmp = line.split()\n names.append(tmp[1])\n if line.startswith('Sequences loaded ...'):\n next = True\n return names",
"def find_names(s):\n \"*** YOUR CODE HERE ***\"",
"def process_names():\n with open(input_names_file, 'r') as data:\n plaintext = data.read()\n name_array = plaintext.split('\\n')\n\n # Final name list\n final_name_list = []\n\n # Parsing different name formats and standardizing to create csv\n for name in name_array:\n if len(name.split(',')) == 2:\n temp_name_list = re.split(reg_ex, name)\n last_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(last_name + ',' + first_name)\n elif len(name.split(' ')) == 2:\n final_name_list.append(name.replace(' ', ','))\n elif len(name.split(' ')) == 3:\n temp_name_list = re.split(' ', name)\n last_name = temp_name_list.pop()\n middle_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(first_name + ',' + middle_name + ' ' + last_name)\n else:\n final_name_list.append(name)\n\n # Writing final name list to a file\n with open(output_names_file, \"w\") as txt_file:\n txt_file.write(\"first_name,last_name\" + \"\\n\")\n for name in final_name_list:\n txt_file.write(name + \"\\n\") # works with any number of elements in a line\n\n names_df = pd.read_csv(output_names_file, names=name_header, sep=',', engine='python')",
"def shortest_first_name(names):\n names = dedup_and_title_case_names(names)\n # ...",
"def match_names(s):\n result = re.findall(r'^[A-Z][a-z]+','name: Bob, age: 14, name: Amanda, age: 17, name: Tim, age: 30')\n print result",
"def sample_first_name(first_name_file, num_samples):\n\n df = pd.read_csv(first_name_file, header=None)\n df.columns = [\"name\", \"gender\", \"count\"]\n df = df[(df[\"count\"] > 10)]\n names = df[\"name\"].sample(n=num_samples, random_state=2021, replace=True).apply(str.title)\n\n return list(names.values)",
"def getName(sentence): #Jasper, Suraj\n userWords = sentence.lower()\n userWords = userWords.split()\n \n # ways of introduction:\n # \"Hello, my name is ___\"\n # \"Hi, I'm ____\"\n # \"Howdy, I'm called ____\"\n # Order: Greeting -> pronoun -> Name -> question (optional)\n # eg. \"Hello, I'm Jasper. How are you?\"\n\n if (userWords[0] in greetings): #the added code that stops iam from being added into the name if 2 greeting are added\n userWords.pop(0) #pop and not .remove because\n \n \n if (userWords[0] == \"i\" and len(userWords) > 1):\n if (userWords[1] in [\"m\",\"am\"]):\n userWords.insert(0, \" \".join(userWords[0:2]))\n userWords.pop(2)\n userWords.pop(1)\n \n userName = \"\"\n for userWord in userWords: #iterate throught the user's words\n foundWord = False #sets True when there's a similar word in the other list\n for word in greetings: #iterates and compares the chosen word from the user's list of words to the words list\n if userWord == word and foundWord == False:\n foundWord = True\n if foundWord == False:\n userName = userName + userWord + \" \"\n return userName #this is the found name",
"def _get_names(self):\n if len(self.firstnames):\n return self.firstnames, self.lastnames\n\n if os.path.exists(\"/code/api/app/utils/names.txt\"):\n with open(\"/code/api/app/utils/names.txt\") as file_with_names:\n names = file_with_names.readlines()\n else:\n # why yes, these are names of African Hollywood actors (according to Wikipedia)\n names = [\"Mehcad Brooks\", \"Malcolm Barrett\", \"Nick Cannon\", \"Lamorne Morris\", \"Neil Brown Jr.\",\n \"William Jackson Harper\", \"Marques Houston\", \"Jennifer Hudson\", \"Alicia Keys\", \"Meghan Markle\",\n \"Beyonce Knowles\", \"Jesse Williams\", \"Lance Gross\", \"Hosea Chanchez\", \"Daveed Diggs\",\n \"Damon Wayans Jr.\", \"Columbus Short\", \"Terrence Jenkins\", \"Ron Funches\", \"Jussie Smollett\",\n \"Donald Glover\", \"Brian Tyree Henry\", \"Gabourey Sidibe\", \"Trai Byers\", \"Robert Ri'chard\",\n \"Arjay Smith\", \"Tessa Thompson\", \"J.Lee\", \"Lauren London\", \"DeVaughn Nixon\", \"Rob Brown\", ]\n for _name in names:\n split_name = _name.strip().split(\" \")\n self.firstnames.append(split_name[0])\n lastname = \" \".join(split_name[1:]) if len(split_name) > 1 else \"\"\n self.lastnames.append(lastname)\n return self.firstnames, self.lastnames",
"def process_name(name):\n def getnames_form3(a):\n \"\"\"\n Case with two commas: the name is of the format\n von Last, Jr, First\n like in: von Hicks, III, Michael\n \"\"\"\n full_last = a[0].strip()\n full_first = a[2].strip()\n junior = a[1].strip()\n von, last = get_vonlast(full_last)\n return [von.strip(), last.strip(), full_first.strip(), junior.strip()]\n\n def getnames_form2(a):\n \"\"\"\n Case with one comma: the name is of the format\n von Last, First\n like in: von Hicks, Michael\n \"\"\"\n full_last = a[0].strip()\n full_first = a[1].strip()\n junior = ''\n von, last = get_vonlast(full_last)\n return [von.strip(), last.strip(), full_first.strip(), junior]\n\n def getnames_form1(a):\n \"\"\"\n Case with NO commas: the name is of the format\n First von Last\n like in: Michael von Hicks\n \"\"\"\n last = a[0].split(' ')\n nfn = 0\n for l in last:\n if l != \"\" and not l[0].islower():\n nfn += 1\n else:\n break\n if nfn == len(last):\n nfn = -1\n\n full_first = ' '.join(last[:nfn])\n full_first = full_first.replace('.', ' ')\n full_last = ' '.join(last[nfn:])\n junior = \" \"\n von, last = get_vonlast(full_last)\n return [von.strip(), last.strip(), full_first.strip(), junior.strip()]\n\n def get_vonlast(full_last):\n von = \"\"\n last = \"\"\n\n for l in full_last.split(' '):\n if len(l) > 0 and l[0].islower():\n von += l.lower() + \" \"\n else:\n last += l + \" \"\n return von, last\n\n # Start the processing\n a = name.split(',')\n if len(a) == 3:\n fullname = getnames_form3(a)\n elif len(a) == 2:\n fullname = getnames_form2(a)\n elif len(a) == 1:\n fullname = getnames_form1(a)\n else:\n fullname = []\n\n return fullname",
"def extract_names(pages: Iterable[tuple[int, list[str]]]) -> DataT:\n found_first = False\n current_name: dict[str, Any] | None = None\n current_label: str | None = None\n current_lines: list[str] = []\n in_headings = True\n\n def start_label(label: str, line: str) -> None:\n nonlocal current_label, current_lines\n assert current_name is not None\n assert current_label is not None\n if label in current_name:\n if label in (\"Syntype\", \"Type Locality\"):\n label = f\"Syntype {line}\"\n assert (\n label not in current_name\n ), f\"duplicate label {label} in {current_name}\"\n current_name[current_label] = current_lines\n current_label = label\n current_lines = [line]\n\n for page, lines in pages:\n if current_name is not None:\n current_name[\"pages\"].append(page)\n for line in lines:\n if not found_first:\n if line.strip() in (\"TYPE SPECIMENS\", \"SPECIMENS\"):\n found_first = True\n continue\n # ignore family/genus headers\n if re.match(\n (\n r\"^\\s*(Genus|Family|Subfamily|Suborder|Order) [A-Z][a-zA-Z]+\"\n r\" [a-zA-Z\\.’, \\-]+(, \\d{4})?$\"\n ),\n line,\n ):\n in_headings = True\n continue\n # ignore blank lines\n if not line:\n continue\n if in_headings:\n if line.startswith(\" \"):\n continue\n else:\n in_headings = False\n if line.startswith(\" \"):\n current_lines.append(line)\n elif re.match(r\"^[A-Z][A-Z a-z-]+: \", line):\n start_label(line.split(\":\")[0], line)\n elif line.startswith(\"Lectotype as designated\"):\n start_label(\"Lectotype\", line)\n elif line.startswith(\"Neotype as designated\"):\n start_label(\"Neotype\", line)\n elif line.startswith(\n (\n \"This specimen\",\n \"Type \",\n \"No type\",\n \"There are\",\n \"No additional\",\n \"All \",\n \"Subspecies of \",\n \"Neotype designated \",\n \"Padre Island\",\n )\n ):\n start_label(\"comments\", line)\n elif line.startswith(\n (\"Secondary junior\", \"Primary junior\", \"Junior primary\")\n ):\n start_label(\"homonymy\", line)\n elif re.match(r\"^[\\d/]+\\. \", line):\n start_label(line.split(\".\")[0], line)\n elif line.startswith(\"USNM\"):\n start_label(line.split(\".\")[0], line)\n elif (\n current_label not in (\"name\", \"verbatim_citation\", \"homonymy\")\n and \":\" not in line\n ):\n # new name\n if current_name is not None:\n assert current_label is not None\n current_name[current_label] = current_lines\n assert any(\n field in current_name\n for field in (\n \"Holotype\",\n \"Type Locality\",\n \"Lectotype\",\n \"Syntype\",\n \"Syntypes\",\n \"No name-bearing status\",\n \"Neotype\",\n )\n ), current_name\n yield current_name\n current_name = {\"pages\": [page]}\n current_label = \"name\"\n current_lines = [line]\n elif current_label == \"name\":\n if re.search(\n r\"\\d|\\b[A-Z][a-z]+\\.|\\baus\\b|\\bDas\\b|\\bPreliminary\\b|\\., \", line\n ):\n start_label(\"verbatim_citation\", line)\n else:\n # probably continuation of the author\n current_lines.append(line)\n elif (\n current_label == \"verbatim_citation\"\n or current_label == \"homonymy\"\n or line.startswith(\"= \")\n ):\n start_label(\"synonymy\", line)\n else:\n assert False, f\"{line!r} with label {current_label}\"\n assert current_label is not None\n assert current_name is not None\n current_name[current_label] = current_lines\n yield current_name",
"def get_surnames(filename):\n result = []\n with open(filename, \"r\") as file:\n for line in file.readlines():\n surname = line.split('\\t')[1]\n result.append(surname)\n return result",
"def match_name(sentence):\n if \"WIFE\" in sentence:\n return \"WIFE\"\n elif \"MAHAVIR\" in sentence or \"FATHER\" in sentence or \"SINGH\" in sentence: \n return \"MAHAVIR\"\n elif \"TEENAGER\" in sentence:\n return \"TEENAGER\"\n elif \"GIRL\" in sentence or \"WOMAN\" in sentence: \n return \"WOMAN\"\n elif \"GUY\" in sentence or \"MAN\" in sentence or \"BROTHER\" in sentence: \n return \"MAN\"\n elif \"COACH\" in sentence:\n return \"COACH\"\n elif \"COMMENT\" in sentence:\n return \"COMMENTATOR\"\n elif sentence[-2:] == \"ER\" or sentence[-3:] == \"IAN\" or sentence[-2:] == \"OR\" or sentence[-1:] == \"D\":\n return \"MISC\"\n \n return sentence",
"def first_words_func():\n return_list = []\n for lyric in lyrics:\n for line in lyric.split(\"\\n\"):\n return_list.append(line.split(\" \")[0])\n return (return_list)",
"def read_names(male_names_file_path, female_names_file_path):\n\n names = set()\n\n with open(male_names_file_path, \"r\") as f1:\n for name in f1:\n names.add(name.strip().lower())\n\n with open(female_names_file_path, \"r\") as f2:\n for name in f2:\n names.add(name.strip().lower())\n\n return names",
"def find_pseudonyms(original_name, gender, topk):\n firstnames = load_firstnames(gender)\n model = load_model()\n whitelist = LetterBag(slugify.slugify(\n WORD_SPLIT_PATTERN.sub(\"\", original_name)))\n for firstname in firstnames:\n if not whitelist.includes(firstname):\n continue\n for lastname, proba in generate_word(model, whitelist.sub(firstname), topk):\n yield firstname.surface, lastname, proba",
"def match_name(pattern, rows):\n matching = []\n for row in rows:\n # Use regex matching to check whether first name or last name contains the pattern\n if re.search(r'%s' % pattern.lower(), row[0].lower()) != None or re.search(r'%s' % pattern.lower(), row[1].lower()) != None:\n matching.append(row)\n\n # print the matched records\n print_records(matching)",
"def load_firstnames(gender):\n return load_resource(\"resources/%s.txt\" % gender)",
"def _first_name_sql(self, first_name, tolerance=1):\n nicknames = self._lookup_name(first_name)\n first_name_selects = []\n first_name_conditions = []\n for i, name in enumerate(nicknames):\n col_name = \"match_first_name_{}\".format(i)\n select = \" lower('{}') as {} \".format(name, col_name)\n first_name_selects.append(select)\n edit_distance = \"\"\"\n (levenshtein(lower(first_name), {col}) <= {tolerance}\n OR levenshtein(lower(nickname), {col}) <= {tolerance})\n \"\"\".format(col=col_name, tolerance=tolerance)\n first_name_conditions.append(edit_distance)\n name_select = \", \".join(first_name_selects)\n name_conditions = \" OR \".join(first_name_conditions)\n return name_select, name_conditions",
"def _match_short_names(self, token_set_one, token_set_two):\n copy_set_one = token_set_one.copy()\n copy_set_two = token_set_two.copy()\n matching_dict = {}\n\n\n for token in token_set_one:\n res = self.dotted_name_re.search(token)\n if res:\n initials = res.group('name')\n for other_token in token_set_two:\n if other_token.startswith(initials):\n copy_set_one.remove(token)\n try:\n copy_set_two.remove(other_token)\n except KeyError:\n continue\n matching_dict[token] = other_token\n break\n else:\n return False, None, None, None\n\n return True, copy_set_one, copy_set_two, matching_dict",
"def getnames(f):\n # Assumes file is sorted with girl names first, boy names second, and the\n # most popular name at the top of each list.\n\n lineoftext = f.readline()\n girlname,sex,count = processline(lineoftext)\n\n while sex != \"M\":\n name,sex,count = processline(f.readline())\n boyname=name\n\n return girlname,boyname",
"def extract_subject_names(file_names):\n return file_names.apply(lambda name: name.split('_')[1])",
"def countByName(lastName, firstName, filename):\r\n\r\n nameCounter = 1 #This variable serves as a counter and it ranges from 0 to 5, which accounts to the line numbers.\r\n isCorrectName = False #This variable evaluates whether the names compare to the names on the text.\r\n gmedals = 0 #Counts the amount of gold medals\r\n smedals = 0 #Counts the amount of silver medals\r\n bmedals = 0 #Counts the amount of bronze medals\r\n\r\n with open(filename, 'r', encoding='utf-8') as file:\r\n for line in file:\r\n line = line.strip().upper()\r\n if nameCounter == 1:\r\n if line == lastName.upper():\r\n isCorrectName = True\r\n else:\r\n isCorrectName = False\r\n if nameCounter == 2 and isCorrectName is True:\r\n if line == firstName.upper():\r\n isCorrectName = True\r\n else:\r\n isCorrectName = False\r\n if nameCounter == 4:\r\n if isCorrectName is True and line == '1':\r\n gmedals += 1\r\n else:\r\n pass\r\n if isCorrectName is True and line == '2':\r\n smedals += 1\r\n else:\r\n pass\r\n if isCorrectName is True and line == '3':\r\n bmedals += 1\r\n\r\n if nameCounter == 5:\r\n nameCounter = 0\r\n isCorrectName = False\r\n\r\n nameCounter += 1\r\n\r\n return gmedals, smedals, bmedals",
"def find_feature_titles_in_file(feature_index, feature_names, file):\n\n dict_of_features_in_this_file = {}\n for feature_name, feature_titles in feature_names.items():\n try:\n features_found = [feature for feature in feature_titles if feature in feature_index]\n if len(features_found) == 1:\n dict_of_features_in_this_file[feature_name] = features_found[0]\n else:\n raise FeatureNotFoundError\n\n except FeatureNotFoundError:\n sys.exit(\n 'ERROR: Finding zero or more than one occurrence of feature {} in the header of input file'\n 'file {}! Please check variable feature_names in the function main().'\n 'Running the code is terminated.'.format(feature_titles, file))\n return dict_of_features_in_this_file",
"def find_match(people, STRs):\n for person in people:\n if compare_str(person, STRs):\n return person[\"name\"]\n return \"No match\"",
"def starts_with(self, matchstr, **kwargs):\r\n \r\n valid_kwargs = ['num_results', 'case_sensitive']\r\n validator.validate(kwargs.keys(), valid_kwargs)\r\n\r\n final_list = []\r\n case_sensitive = False\r\n num_results = 0\r\n \r\n if 'num_results' in kwargs:\r\n num_results = int(kwargs['num_results'])\r\n \r\n if len(matchstr) == 0:\r\n if num_results:\r\n return self.__sorted_names[0:num_results]\r\n return self.__sorted_names[:]\r\n\r\n if 'case_sensitive' in kwargs:\r\n if kwargs['case_sensitive']:\r\n case_sensitive = True\r\n\r\n tag_names_that_start_with_char = []\r\n \r\n if case_sensitive:\r\n if matchstr[0] not in self.__name_index:\r\n return []\r\n else:\r\n if matchstr[0].lower() not in self.__name_index and matchstr[0].upper() not in self.__name_index:\r\n return []\r\n \r\n if case_sensitive:\r\n idxs = self.__name_index[matchstr[0]]\r\n \r\n if idxs['first'] == idxs['last'] + 1:\r\n tag_names_that_start_with_char = self.__sorted_names[idxs['first']]\r\n else:\r\n tag_names_that_start_with_char = self.__sorted_names[idxs['first']:idxs['last'] + 1]\r\n \r\n else:\r\n if matchstr[0].lower() in self.__name_index:\r\n idxs = self.__name_index[matchstr[0].lower()]\r\n \r\n if idxs['first'] == idxs['last'] + 1:\r\n tag_names_that_start_with_char = self.__sorted_names[idxs['first']]\r\n else:\r\n tag_names_that_start_with_char = self.__sorted_names[idxs['first']:idxs['last'] + 1]\r\n\r\n if matchstr[0].upper() in self.__name_index:\r\n idxs = self.__name_index[matchstr[0].upper()]\r\n \r\n if idxs['first'] == idxs['last'] + 1:\r\n tag_names_that_start_with_char += [self.__sorted_names[idxs['first']]]\r\n else:\r\n tag_names_that_start_with_char += self.__sorted_names[idxs['first']:idxs['last'] + 1]\r\n \r\n if len(matchstr) == 1:\r\n if num_results == 0:\r\n return tag_names_that_start_with_char[:]\r\n else:\r\n return tag_names_that_start_with_char[0:num_results]\r\n \r\n if case_sensitive:\r\n for t in tag_names_that_start_with_char:\r\n if (t.find(matchstr) == 0):\r\n final_list.append(copy(t))\r\n if num_results > 0 and len(final_list) == num_results:\r\n return final_list\r\n else:\r\n for t in tag_names_that_start_with_char:\r\n if (t.lower().find(matchstr.lower()) == 0):\r\n final_list.append(copy(t))\r\n if num_results > 0 and len(final_list) == num_results:\r\n return final_list\r\n\r\n return final_list",
"def check_named_entity(check):\r\n\tglobal word_buffer\r\n\tglobal temp\r\n\t\r\n\t\r\n\tif check == \"All\": \r\n\t# @return - Return Named Entities identified from the begining of the sentence except for the Named Entity at the end\r\n \r\n\t if temp == 1: \r\n \r\n\t\tnamed_entity = join_named_entity(word_buffer)\r\n\r\n\t\tword_buffer = []\r\n\t\t\r\n\t\ttemp = 0\r\n\r\n\t\treturn named_entity\r\n\telse:\r\n\t# @ return - Return Named Entity present at the end of the sentence, if available\r\n\r\n\t if len(word_buffer)>1: \r\n\t \r\n named_entity = join_named_entity(word_buffer)\r\n \r\n\t\treturn named_entity",
"def finddocname(string):\r\n for x in doclist:\r\n foundvar = f\"-->Doc name = {x.title()}\"\r\n if x in string:\r\n print(foundvar)\r\n break"
] |
[
"0.6360774",
"0.6280207",
"0.6206798",
"0.6189551",
"0.6042509",
"0.5802803",
"0.5793913",
"0.5733172",
"0.57170224",
"0.5716283",
"0.56726676",
"0.56491053",
"0.563351",
"0.5597997",
"0.5563706",
"0.5563537",
"0.55441076",
"0.55139315",
"0.5505184",
"0.5496029",
"0.54784536",
"0.54717904",
"0.5466944",
"0.54575783",
"0.5425145",
"0.5419574",
"0.5407387",
"0.538537",
"0.5381204",
"0.536705"
] |
0.6673137
|
0
|
Find names position in a sentence based on a FIRST_NAMES file
|
def find_names_position(sentence=None, last_names_enabled=True, no_names_enabled=False):
if not sentence:
raise Exception(ParameterMissing, "This method requires sentence as input")
if not isinstance(sentence, str):
raise Exception(TypeError, "This method requires string as input")
names_found = find_names(sentence, last_names_enabled=last_names_enabled, no_names_enabled=no_names_enabled)
to_return = []
for name in names_found:
begin_positions = [m.start() for m in re.finditer(name, sentence)]
for begin in begin_positions:
to_return.append((begin, begin + len(name)))
# begin = sentence.lower().index(name.lower())
# end = begin + len(name)
# to_return.append((begin, end))
return to_return
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_names(lines): \n next = False \n names = []\n for line in lines:\n if next:\n if len(line) == 1:\n break\n else:\n tmp = line.split()\n names.append(tmp[1])\n if line.startswith('Sequences loaded ...'):\n next = True\n return names",
"def find_names(s):\n \"*** YOUR CODE HERE ***\"",
"def fetch_candidate_name(self):\r\n # variable to save possible matches\r\n possible_names = []\r\n\r\n # source text is input document in text format\r\n nlp_text = self.doc # := nlp(self.stringtext)\r\n\r\n # Add patterns to match proper names\r\n patterns = [[{'POS': 'PROPN'}]]\r\n self.matcher.add('NAME', patterns) \r\n matches = self.matcher(nlp_text) \r\n\r\n # fetch the matches\r\n for match_id, start, end in matches:\r\n span = nlp_text[start:end] \r\n possible_names += [span.text] \r\n if len(possible_names) >= 2: \r\n break\r\n\r\n # Extract candidates\r\n doc_entities = self.doc.ents\r\n\r\n # Subset to person type entities\r\n doc_persons = filter(lambda x: x.label_ == 'PERSON', doc_entities)\r\n doc_persons = filter(lambda x: len(\r\n x.text.strip().split()) >= 2, doc_persons)\r\n doc_persons = map(lambda x: x.text.strip(), doc_persons)\r\n doc_persons = list(doc_persons)\r\n\r\n # Assume the first Person entity with more than two tokens is the candidate's name\r\n if len(doc_persons) > 0:\r\n return possible_names + [doc_persons[0]]\r\n\r\n return \"NOT FOUND\"",
"def find_names(text):\n\n names = []\n\n # spacy doc\n doc = nlp(text)\n\n # pattern\n pattern = [{'LOWER': 'prime'},\n {'LOWER': 'minister'},\n {'POS': 'ADP', 'OP': '?'},\n {'POS': 'PROPN'}]\n\n # Matcher class object\n matcher = Matcher(nlp.vocab)\n matcher.add(\"names\", None, pattern)\n\n matches = matcher(doc)\n\n # finding patterns in the text\n\n for i in range(0, len(matches)):\n\n # match: id, start, end\n token = doc[matches[i][1]:matches[i][2]]\n # append token to list\n names.append(str(token))\n\n # Only keep sentences containing Indian PMs\n\n for name in names:\n if (name.split()[2] == 'of') and (name.split()[3] != \"India\"):\n names.remove(name)\n\n return names",
"def find_names(sentence=None, last_names_enabled=True, no_names_enabled=False):\n if not sentence:\n raise Exception(ParameterMissing, \"This method requires sentence as input\")\n\n if not isinstance(sentence, str):\n raise Exception(TypeError, \"This method requires string as input\")\n\n first_names = get_first_names_pack()\n if not first_names:\n raise Exception(VariableNotSet, \"Variable FIRST_NAMES is not set in settings.py\")\n\n if last_names_enabled:\n last_names = get_last_names_pack()\n if not last_names:\n raise Exception(VariableNotSet, \"Variable LAST_NAMES is not set in settings.py\")\n first_names = list(set(first_names).union(set(last_names)))\n\n if no_names_enabled:\n no_names = get_no_names_pack()\n if not no_names:\n raise Exception(VariableNotSet, \"Variable NO_NAMES is not set in settings.py\")\n first_names = list(set(first_names).difference(set(no_names)))\n\n punctuation = '!@#$%^&*()_+<>?:.,;'\n\n for c in sentence:\n if c in punctuation:\n sentence = sentence.replace(c, \" \")\n\n words = sentence.lower().split()\n res = set(words).intersection(first_names)\n\n to_return = [w.title() for w in res]\n\n return to_return",
"def test_word_positions_in_file(self):\n pass",
"def countByName(lastName, firstName, filename):\r\n\r\n nameCounter = 1 #This variable serves as a counter and it ranges from 0 to 5, which accounts to the line numbers.\r\n isCorrectName = False #This variable evaluates whether the names compare to the names on the text.\r\n gmedals = 0 #Counts the amount of gold medals\r\n smedals = 0 #Counts the amount of silver medals\r\n bmedals = 0 #Counts the amount of bronze medals\r\n\r\n with open(filename, 'r', encoding='utf-8') as file:\r\n for line in file:\r\n line = line.strip().upper()\r\n if nameCounter == 1:\r\n if line == lastName.upper():\r\n isCorrectName = True\r\n else:\r\n isCorrectName = False\r\n if nameCounter == 2 and isCorrectName is True:\r\n if line == firstName.upper():\r\n isCorrectName = True\r\n else:\r\n isCorrectName = False\r\n if nameCounter == 4:\r\n if isCorrectName is True and line == '1':\r\n gmedals += 1\r\n else:\r\n pass\r\n if isCorrectName is True and line == '2':\r\n smedals += 1\r\n else:\r\n pass\r\n if isCorrectName is True and line == '3':\r\n bmedals += 1\r\n\r\n if nameCounter == 5:\r\n nameCounter = 0\r\n isCorrectName = False\r\n\r\n nameCounter += 1\r\n\r\n return gmedals, smedals, bmedals",
"def index_of(self, last_name, first_name):\n self.is_at_with_exception()\n self.refresh_table()\n i = 0\n for item in self._table['first_name_column']:\n if item.text == first_name:\n if self._table['last_name_column'][i].text == last_name:\n return i\n else:\n i = i + 1\n return -1",
"def MatchProtNames(ProteomeDict, MS_names, MS_seqs):\n matchedNames, seqs, Xidx = [], [], []\n counter = 0\n for i, MS_seq in enumerate(MS_seqs):\n MS_seqU = MS_seq.upper()\n MS_name = MS_names[i].strip()\n if MS_name in ProteomeDict and MS_seqU in ProteomeDict[MS_name]:\n Xidx.append(i)\n seqs.append(MS_seq)\n matchedNames.append(MS_name)\n else:\n try:\n newname = getKeysByValue(ProteomeDict, MS_seqU)[0]\n assert MS_seqU in ProteomeDict[newname]\n Xidx.append(i)\n seqs.append(MS_seq)\n matchedNames.append(newname)\n except BaseException:\n print(MS_name, MS_seqU)\n counter += 1\n continue\n\n assert counter == 0, \"Proteome is missing %s peptides\" % (counter)\n assert len(matchedNames) == len(seqs)\n return matchedNames, seqs, Xidx",
"def shortest_first_name(names):\n names = dedup_and_title_case_names(names)\n # ...",
"def find_word(self,word):\r\n self.start_pos = []\r\n #check each row\r\n for i in range(0,len(self.wordsearch)):\r\n #check each column\r\n for j in range(0, len(self.wordsearch[i])):\r\n #find all coordinates which have the first letter of the word and store them\r\n if self.wordsearch[i][j] == self.word[0]:\r\n self.start_pos.append([i,j])\r\n \r\n \r\n #print(count)\r\n for pos in self.start_pos:\r\n if self.check_start(self.word, pos):\r\n \r\n return",
"def _first_name_sql(self, first_name, tolerance=1):\n nicknames = self._lookup_name(first_name)\n first_name_selects = []\n first_name_conditions = []\n for i, name in enumerate(nicknames):\n col_name = \"match_first_name_{}\".format(i)\n select = \" lower('{}') as {} \".format(name, col_name)\n first_name_selects.append(select)\n edit_distance = \"\"\"\n (levenshtein(lower(first_name), {col}) <= {tolerance}\n OR levenshtein(lower(nickname), {col}) <= {tolerance})\n \"\"\".format(col=col_name, tolerance=tolerance)\n first_name_conditions.append(edit_distance)\n name_select = \", \".join(first_name_selects)\n name_conditions = \" OR \".join(first_name_conditions)\n return name_select, name_conditions",
"def name_extractor(file):\n \n import os\n import re\n \n name_list = []\n rank_dict = {}\n \n year = re.search(r'(\\d+)\\.html$', file) \n current = open(file) \n match = re.findall(r'<tr\\salign=\"right\"><td>(\\d+).*?>(\\w+).*?>(\\w+)', current.read())\n current.close\n\n \n for one_touple in match: #Check for existing match, only accept lower rank value into dictionary\n \n for index in range(1,2):\n \n if one_touple[index] in rank_dict:\n if rank_dict[one_touple[index]] < one_touple[0]:\n continue\n rank_dict[one_touple[index]] = one_touple[0]\n \n for one_item in rank_dict:\n \n ranking = rank_dict[one_item] #Build target list from dictionary formatted as \"Name rank\"\n name_list.append(f\"{one_item} {ranking}\") \n \n name_list = sorted(name_list)\n name_list.insert(0,year.group(1))\n \n return name_list",
"def process_name(name):\n def getnames_form3(a):\n \"\"\"\n Case with two commas: the name is of the format\n von Last, Jr, First\n like in: von Hicks, III, Michael\n \"\"\"\n full_last = a[0].strip()\n full_first = a[2].strip()\n junior = a[1].strip()\n von, last = get_vonlast(full_last)\n return [von.strip(), last.strip(), full_first.strip(), junior.strip()]\n\n def getnames_form2(a):\n \"\"\"\n Case with one comma: the name is of the format\n von Last, First\n like in: von Hicks, Michael\n \"\"\"\n full_last = a[0].strip()\n full_first = a[1].strip()\n junior = ''\n von, last = get_vonlast(full_last)\n return [von.strip(), last.strip(), full_first.strip(), junior]\n\n def getnames_form1(a):\n \"\"\"\n Case with NO commas: the name is of the format\n First von Last\n like in: Michael von Hicks\n \"\"\"\n last = a[0].split(' ')\n nfn = 0\n for l in last:\n if l != \"\" and not l[0].islower():\n nfn += 1\n else:\n break\n if nfn == len(last):\n nfn = -1\n\n full_first = ' '.join(last[:nfn])\n full_first = full_first.replace('.', ' ')\n full_last = ' '.join(last[nfn:])\n junior = \" \"\n von, last = get_vonlast(full_last)\n return [von.strip(), last.strip(), full_first.strip(), junior.strip()]\n\n def get_vonlast(full_last):\n von = \"\"\n last = \"\"\n\n for l in full_last.split(' '):\n if len(l) > 0 and l[0].islower():\n von += l.lower() + \" \"\n else:\n last += l + \" \"\n return von, last\n\n # Start the processing\n a = name.split(',')\n if len(a) == 3:\n fullname = getnames_form3(a)\n elif len(a) == 2:\n fullname = getnames_form2(a)\n elif len(a) == 1:\n fullname = getnames_form1(a)\n else:\n fullname = []\n\n return fullname",
"def extract_names(filename):\n # +++your code here+++\n # Opening the file\n f = open(filename, 'rU')\n # Reading all of the lines\n lines = f.readlines()\n # Empty list to hold the year, names, and ranks\n ranks_names = []\n for line in lines:\n # search for the year\n year = re.search(r'\\s(\\d\\d\\d\\d)</h3>', line)\n # if the year is found, append it to the list\n if year: \n ranks_names.append(year.group(1))\n # search for the rank, male name, and female name\n rank_male_female = re.search(r'(\\d+)</td><td>(\\w+)</td><td>(\\w+)</td>', line)\n # If they are found then append the male name plus its rank, as well as the \n # female name plus its rank\n if rank_male_female:\n ranks_names.append(rank_male_female.group(2) + ' ' + rank_male_female.group(1))\n ranks_names.append(rank_male_female.group(3) + ' ' + rank_male_female.group(1))\n # Sort the list alphabetically\n ranks_names.sort()\n # Return the list\n return ranks_names",
"def test_first_name_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_first_name(input_val)\n self.assertEqual(output_val, self.line.first_name)",
"def test_find_first_author_initial(self):\n inv_search = 'firstauthor:\"ellis, j*\"'\n spi_search = 'find fa j ellis'\n self._compare_searches(inv_search, spi_search)",
"def fix_seqname(sname):\r\n # protid is on each line of the FASTA file; splitting doesn't really do anything\r\n # protid = sname.split(' ')\r\n # TK 2020-07-22\r\n # Dictionary for filenames so that we know which CDS file to query for each\r\n # protein ID.\r\n lookups = {\r\n 'AET' : 'Aegilops_tauschii.Aet_v4.0.cds.all.fa',\r\n\t'PNS' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'PNT' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQJ' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQK' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'Dr' : 'Dioscorea_rotundata.TDr96_F1_Pseudo_Chromosome_v1.0.cds.all.fa',\r\n\t'Et' : 'Eragrostis_tef.ASM97063v1.cds.all.fa',\r\n\t'HORVU' : 'Hordeum_vulgare.IBSC_v2.cds.all.fa',\r\n\t'LPERR' : 'Leersia_perrieri.Lperr_V1.4.cds.all.fa',\r\n\t'GSMUA' : 'Musa_acuminata.ASM31385v1.cds.all.fa',\r\n\t'OBART' : 'Oryza_barthii.O.barthii_v1.cds.all.fa',\r\n\t'ORGLA' : 'Oryza_glaberrima.Oryza_glaberrima_V1.cds.all.fa',\r\n\t'ONIVA': 'Oryza_nivara.Oryza_nivara_v1.0.cds.all.fa',\r\n\t'ORUFI' : 'Oryza_rufipogon.OR_W1943.cds.all.fa',\r\n\t'PVH' : 'Panicum_hallii_fil2.PHallii_v3.1.cds.all.fa',\r\n\t'Sspon' : 'Saccharum_spontaneum.Sspon.HiC_chr_asm.cds.all.fa',\r\n\t'KQL' : 'Setaria_italica.Setaria_italica_v2.0.cds.all.fa',\r\n\t'TraesCS' : 'Triticum_aestivum.IWGSC.cds.all.fa',\r\n\t'Zm' : 'Zea_mays.B73_RefGen_v4.cds.all.fa',\r\n\t'Zlat': 'Zlat_V1.cds.fa',\r\n 'FUN': 'rice.transcripts.fa',\r\n 'Os': 'Oryza_sativa.IRGSP-1.0.cds.all.fa'\r\n }\r\n # Get the filename based on what the sequence starts with.\r\n for id_start, cds_file in lookups.items():\r\n if sname.startswith(id_start):\r\n target_file = cds_file\r\n break\r\n # Return the protein name and CDS target file as a tuple\r\n return (target_file, sname)\r\n\r\n # Make a lookup table to get the species name based on the protein ID.\r\n # lookups = [('Zlat*','Zizania_latifolia'),('FUN*','Zizania_palustris'),('Os*','Oryza_sativa')]\r\n # Initialize an empty species dictionary to assist in connecting protid (gene name) to species name\r\n # species_dict = {}\r\n # # This for loop will populate the species dictionary so that we can get species name keyed on the protid (gene name)\r\n # for i in protid:\r\n # species = lookup(i, lookups)\r\n # return species.encode, i\r\n # species_dict[protid] = species.encode()\r\n # return None\r",
"def find_feature_titles_in_file(feature_index, feature_names, file):\n\n dict_of_features_in_this_file = {}\n for feature_name, feature_titles in feature_names.items():\n try:\n features_found = [feature for feature in feature_titles if feature in feature_index]\n if len(features_found) == 1:\n dict_of_features_in_this_file[feature_name] = features_found[0]\n else:\n raise FeatureNotFoundError\n\n except FeatureNotFoundError:\n sys.exit(\n 'ERROR: Finding zero or more than one occurrence of feature {} in the header of input file'\n 'file {}! Please check variable feature_names in the function main().'\n 'Running the code is terminated.'.format(feature_titles, file))\n return dict_of_features_in_this_file",
"def process_names():\n with open(input_names_file, 'r') as data:\n plaintext = data.read()\n name_array = plaintext.split('\\n')\n\n # Final name list\n final_name_list = []\n\n # Parsing different name formats and standardizing to create csv\n for name in name_array:\n if len(name.split(',')) == 2:\n temp_name_list = re.split(reg_ex, name)\n last_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(last_name + ',' + first_name)\n elif len(name.split(' ')) == 2:\n final_name_list.append(name.replace(' ', ','))\n elif len(name.split(' ')) == 3:\n temp_name_list = re.split(' ', name)\n last_name = temp_name_list.pop()\n middle_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(first_name + ',' + middle_name + ' ' + last_name)\n else:\n final_name_list.append(name)\n\n # Writing final name list to a file\n with open(output_names_file, \"w\") as txt_file:\n txt_file.write(\"first_name,last_name\" + \"\\n\")\n for name in final_name_list:\n txt_file.write(name + \"\\n\") # works with any number of elements in a line\n\n names_df = pd.read_csv(output_names_file, names=name_header, sep=',', engine='python')",
"def shortest_first_name(names):\n names = dedup_and_title_case_names(names)\n split_names = [name.split(' ') for name in names]\n first_name = [first for first, last in split_names]\n shortest = first_name[0]\n for name in first_name:\n if len(name) < len(shortest):\n shortest = name\n\n return shortest",
"def shortest_first_name(names):\n names = dedup_and_title_case_names(names)\n return sorted([name.split()[0] for name in names], key=len)[0]",
"def count_name(text, adj):\n for x in re.finditer(r'[A-Z][a-z]*[\\s][A-Z][a-z]*',text):\n adj[x.group()] += 1\n return",
"def _get_header_position(header_row: List[str], column_title: str) -> int:\n for pos, column in enumerate(header_row):\n if column_title.lower() in column.lower():\n return pos\n\n raise Exception(\"Expected column header not found for {}\".format(column_title))",
"def find_match(second_file, title):\r\n # Initialize variables/ open files\r\n seq2 = \"\"\r\n header2 = \"\"\r\n match_fh = open(second_file, \"r\")\r\n # parse through lines of file\r\n for lines in match_fh:\r\n # If > found assume its header\r\n if lines[0] == \">\":\r\n # header2 = lines\r\n # If a header has been found, pull strain name, orgainism and subtype for new header\r\n if len(header2) > 0:\r\n matches2 = re.findall(\"(Strain Name:[AB]\\/[\\/A-Za-z 0-9()\\\\-_'.]+)\", header2)\r\n subtype_match2 = re.findall(\"(Subtype:[A-Za-z0-9]+)\", header2)\r\n organ2 = re.findall(\"(Organism:[\\/A-Za-z 0-9()\\\\-_'.]+)\", header2)\r\n header2 = \">\" + organ2[0] + \"|\" + matches2[0] + \"|\" + subtype_match2[0]\r\n # if new header equals input header then return it and the sequence\r\n if header2 == title:\r\n match_fh.close()\r\n print(\"match\")\r\n return header2, seq2\r\n # Reset the header and seq\r\n header2 = lines\r\n seq2 = \"\"\r\n\r\n else:\r\n # if it is part of the sequence\r\n seq2 = seq2 + lines\r\n\r\n # to return the last entry in the file, since loop won't be able to return it\r\n matches2 = re.findall(\"(Strain Name:[AB]\\/[\\/A-Za-z 0-9()\\\\-_'.]+)\", header2)\r\n subtype_match2 = re.findall(\"(Subtype:[A-Za-z0-9]+)\", header2)\r\n organ2 = re.findall(\"(Organism:[\\/A-Za-z 0-9()\\\\-_'.]+)\", header2)\r\n header2 = \">\" + organ2[0] + \"|\" + matches2[0] + \"|\" + subtype_match2[0]\r\n match_fh.close()\r\n return header2, seq2",
"def getnames(f):\n # Assumes file is sorted with girl names first, boy names second, and the\n # most popular name at the top of each list.\n\n lineoftext = f.readline()\n girlname,sex,count = processline(lineoftext)\n\n while sex != \"M\":\n name,sex,count = processline(f.readline())\n boyname=name\n\n return girlname,boyname",
"def __get_names(self): \n names_str = self.names_text.get(1.0, END)\n names = names_str.splitlines()\n return names",
"def getPosition(fname, pos):\n count = 0\n infile = open(fname, \"r\")\n n_line = infile.readline()\n temp = []\n for line in infile:\n words = line.split(',')\n temp.append(Player(words[0], int(words[1]), int(words[2]), int(words[3]),\n int(words[4]), float(words[5]), pos, 1))\n count += 1\n infile.close()\n return temp, count",
"def match_name(sentence):\n if \"WIFE\" in sentence:\n return \"WIFE\"\n elif \"MAHAVIR\" in sentence or \"FATHER\" in sentence or \"SINGH\" in sentence: \n return \"MAHAVIR\"\n elif \"TEENAGER\" in sentence:\n return \"TEENAGER\"\n elif \"GIRL\" in sentence or \"WOMAN\" in sentence: \n return \"WOMAN\"\n elif \"GUY\" in sentence or \"MAN\" in sentence or \"BROTHER\" in sentence: \n return \"MAN\"\n elif \"COACH\" in sentence:\n return \"COACH\"\n elif \"COMMENT\" in sentence:\n return \"COMMENTATOR\"\n elif sentence[-2:] == \"ER\" or sentence[-3:] == \"IAN\" or sentence[-2:] == \"OR\" or sentence[-1:] == \"D\":\n return \"MISC\"\n \n return sentence",
"def _get_names(self):\n if len(self.firstnames):\n return self.firstnames, self.lastnames\n\n if os.path.exists(\"/code/api/app/utils/names.txt\"):\n with open(\"/code/api/app/utils/names.txt\") as file_with_names:\n names = file_with_names.readlines()\n else:\n # why yes, these are names of African Hollywood actors (according to Wikipedia)\n names = [\"Mehcad Brooks\", \"Malcolm Barrett\", \"Nick Cannon\", \"Lamorne Morris\", \"Neil Brown Jr.\",\n \"William Jackson Harper\", \"Marques Houston\", \"Jennifer Hudson\", \"Alicia Keys\", \"Meghan Markle\",\n \"Beyonce Knowles\", \"Jesse Williams\", \"Lance Gross\", \"Hosea Chanchez\", \"Daveed Diggs\",\n \"Damon Wayans Jr.\", \"Columbus Short\", \"Terrence Jenkins\", \"Ron Funches\", \"Jussie Smollett\",\n \"Donald Glover\", \"Brian Tyree Henry\", \"Gabourey Sidibe\", \"Trai Byers\", \"Robert Ri'chard\",\n \"Arjay Smith\", \"Tessa Thompson\", \"J.Lee\", \"Lauren London\", \"DeVaughn Nixon\", \"Rob Brown\", ]\n for _name in names:\n split_name = _name.strip().split(\" \")\n self.firstnames.append(split_name[0])\n lastname = \" \".join(split_name[1:]) if len(split_name) > 1 else \"\"\n self.lastnames.append(lastname)\n return self.firstnames, self.lastnames"
] |
[
"0.5892083",
"0.58443296",
"0.5784747",
"0.57840866",
"0.5768939",
"0.5741232",
"0.5701107",
"0.5652323",
"0.54787356",
"0.5471315",
"0.53691196",
"0.5327336",
"0.53244394",
"0.5305583",
"0.5301656",
"0.5268954",
"0.5267728",
"0.5263014",
"0.5252052",
"0.5240446",
"0.5232655",
"0.5205766",
"0.5199973",
"0.5193679",
"0.51889163",
"0.517899",
"0.51580447",
"0.5155355",
"0.515433",
"0.512415"
] |
0.70489156
|
0
|
Display messages based on the window
|
def displayMessages(window,messages=['']):
# update messages text
message_in_line = ''
for msg in messages:
message_in_line += '\n'+msg
window['messages'].update(f'{message_in_line}')
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def showMessage(self):",
"def display_message(self, message):\n if self.web_crawler_window is None and self.webpage_classifier_window is None:\n self.machine_learner_window.display_message(message)\n elif self.web_crawler_window is None and self.machine_learner_window is None:\n self.webpage_classifier_window.display_message(message)\n elif self.webpage_classifier_window is None and self.machine_learner_window is None:\n self.web_crawler_window.display_message(message)",
"def display_messages(self, layout):",
"def display_message(window, msg):\n v = create_output_panel(window, '')\n _append(v, msg)",
"def display_message():",
"def show_messages(self):\n console.alert(\n \"Info\",\n \"If StaSh does not launch anymore after you changed the config, run the 'launch_stash.py' script with \\n'--no-cfgfile'.\",\n \"Ok\",\n hide_cancel_button=True,\n )\n while True:\n self.wait_modal()\n if not self.subview_open:\n break\n console.alert(\n \"Info\",\n \"Some changes may only be visible after restarting StaSh and/or Pythonista.\",\n \"Ok\",\n hide_cancel_button=True,\n )",
"def show(self, window):\r\n\r\n return",
"def show_messages(self):\n self.masterlog.revealme()",
"def show_messages(self):\n for msg in self.messages:\n print msg['text']",
"def showinfo(self, msg):\n tkinter.messagebox.showinfo('Information', msg)",
"def __window_alert(self, text):\n print str(text)\n config.VERBOSE(config.VERBOSE_DEBUG, '[DEBUG] alertmsg: ' + str(text))",
"def print2message():\n return OverrideManager(\"Output Window\")",
"def event_loop(self):\n if self.message_counter:\n if not self.msg:\n self.showdialog()\n else:\n self.msg.setText(\n \"COMET encounterd {} error(s)\".format(self.message_counter).ljust(\n 70\n )\n )",
"def show(self):\n self.present(orientations=ORIENTATIONS)\n # launch a background thread\n # we can not use ui.in_background here\n # because some dialogs would not open anymoe\n thr = threading.Thread(target=self.show_messages)\n thr.daemon = True\n thr.start()",
"def doMessageWindow(msg):\n _loadMsgSettings()\n if settings.has_key(msg):\n return\n global dialog\n dialog = QtGui.QDialog()\n msgDialog = ui.message.Ui_Dialog()\n msgDialog.setupUi(dialog)\n msgDialog.messageLabel.setText(msg)\n dialog.exec_()\n if msgDialog.showAgainCheckBox.isChecked():\n settings[msg] = True\n _saveMsgSettings()",
"def show_msgdialog(self):\n log_msg = log.getBufferAsString()\n if not log_msg:\n return\n\n # initialise message dialog\n msg_dialog = msgdialog.MessageDialog(None, -1, \"\")\n msg_dialog.msg_list.InsertColumn(0, \"\")\n\n # clear dialog and show new messages\n msg_dialog.msg_list.Freeze()\n msg_dialog.msg_list.DeleteAllItems()\n for line in log_msg.split('\\n'):\n msg_dialog.msg_list.Append([line, ])\n msg_dialog.msg_list.SetColumnWidth(0, -1)\n msg_dialog.msg_list.Thaw()\n msg_dialog.ShowModal()\n msg_dialog.Destroy()",
"def modeMsgBox(self, messageText):\n self.createMessage(messageText)",
"def msg_show(self,msg):\r\n self.frame.Show()\r\n self.frame.Raise()",
"def display_abort_msg(self):\r\n \r\n labelfont = ('times', 20, 'bold') \r\n msg_window = Toplevel(self.root) # Child of root window \r\n msg_window.geometry(\"650x180+300+300\") # Size of window, plus x and y placement offsets \r\n msg_window.title(\"Error Message\")\r\n msg_window.config(bg='red')\r\n msg_window.config(borderwidth=5)\r\n msg_window.config(relief=\"sunken\")\r\n self.msgStr = StringVar()\r\n self.msgStr.set(\" Session was ABORTED \\r due to an unrecoverable input or output error \")\r\n\r\n label1 = ttk.Label(msg_window,textvariable = self.msgStr, background=\"White\",foreground=\"Red\")\r\n #option must be -column, -columnspan, -in, -ipadx, -ipady, -padx, -pady, -row, -rowspan, or -sticky\r\n label1.config(font=labelfont) \r\n label1.grid(row=1,column=1, padx = 20, pady = 20, sticky='nesw')\r\n\r\n button1 = ttk.Button(msg_window, text=' OK ',command = msg_window.destroy)\r\n button1.grid(row=2,column=1, padx=20, pady=10)",
"def MessageWindow(screen, title, text, width=40, help=None, timer_ms=None, \n run_type=RT_EXECUTEANDPOP):\n \n g = GridFormHelp(screen, title, help, 1, 3)\n\n t = TextboxReflowed(width, text)\n g.add(t, 0, 0)\n\n if timer_ms:\n g.form.w.settimer(timer_ms)\n\n (button, is_esc) = ActivateWindow(g, run_type)\n\n return {'is_esc': is_esc, \n 'grid': g,\n }",
"def message_box(self):\n root = tk.Toplevel(self.top)\n root.attributes('-topmost', True)\n root.geometry(\"+650+100\")\n root.withdraw()\n messagebox.showinfo('Oh oh', 'Wrong message. Try again!')\n try:\n root.destroy()\n except:\n pass",
"def msg_about(self):\n self.window.withdraw()\n msg.showinfo(\"About Text Reader\",\n \"A Python GUI created to convert text from files to speech and describe the text in 5 most \"\n \"popular words.\")\n self.window.deiconify()",
"def message_box(subject, content):\r\n root = tk.Tk()\r\n root.attributes(\"-topmost\", True)\r\n root.withdraw()\r\n messagebox.showinfo(subject, content)\r\n try:\r\n root.destroy()\r\n except:\r\n pass",
"def show_message(message, col=c.r, update=False):\n g.content = generate_songlist_display()\n g.message = col + message + c.w\n\n if update:\n screen_update()",
"def game_win(self):\n self.win = True\n self.msg.set_text(u'YOU WIN <Press Space>')\n self.msg.show(True)",
"def msg_window(text):\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Information)\n msg.setText(text)\n msg.setWindowTitle(\"Info\")\n msg.exec_()",
"def showMessage(self, msg):\r\n super(SplashScreen, self).showMessage(\r\n msg, self.labelAlignment, QColor(_QtCore.Qt.white))\r\n QApplication.processEvents()",
"def showMessage(self, message):\r\n print message",
"def open_main_window(self):\r\n track_terms_dic = ''\r\n sg.theme(self.look)\r\n\r\n layout = [[sg.Text('Welcome to tweeet monitor ')],\r\n [sg.Text('Please enter Details ')],\r\n [sg.Text('User Mail', size=(15, 1)), sg.InputText()],\r\n [sg.Text('Timout', size=(15, 1)), sg.InputText('', enable_events=True, key='-DIGITS-')],\r\n [sg.Text('')],\r\n [sg.Text('You can select an existing list or create a new one '),\r\n sg.Combo(self.files, default_value='Select Track Terms List ', key='-COMBO1-')],\r\n [sg.Text('')],\r\n [sg.Button('Select Exists List'), sg.Button('Create a New List')],\r\n [sg.Text('\\n')],\r\n [sg.Button('Start Monitor'), sg.Button('Exit')]\r\n ]\r\n\r\n window = sg.Window('Monitor tweeter', layout)\r\n # Event Loop\r\n while True:\r\n event, values = window.read()\r\n\r\n if event == sg.WIN_CLOSED:\r\n exit()\r\n elif event == 'Select Exists List' or event == 'Create a New List' or event == 'Start Monitor':\r\n user_mail = values[0]\r\n timeout = values['-DIGITS-']\r\n list_dic = values['-COMBO1-']\r\n\r\n if self.check(user_mail) == 'Invalid Email':\r\n self.info_popup_window('You Enter not valid mail ', 'Info', self.look)\r\n elif event == 'Select Exists List':\r\n if list_dic == 'Select Track Terms List ':\r\n self.info_popup_window('Track Terms List ', 'Info', self.look)\r\n else:\r\n file_name = self.path + self.bachslash + list_dic\r\n os.system(file_name)\r\n track_terms_dic = list_dic\r\n elif event == 'Create a New List':\r\n track_terms_dic = self.open_window()\r\n track_terms_dic = track_terms_dic + '.txt'\r\n elif event == 'Start Monitor':\r\n if track_terms_dic == '':\r\n self.info_popup_window('Please, Create new Dictionary or select one ', 'Info', self.look)\r\n elif track_terms_dic != '':\r\n file_name = self.path + self.bachslash + track_terms_dic\r\n my_file = open(file_name, \"r\")\r\n content = my_file.read()\r\n content = content.split(\"\\n\")\r\n content = self.cleanList(content)\r\n # print(content)\r\n my_file.close()\r\n now = datetime.now()\r\n date_time = now.strftime(\"%m/%d/%Y, %H:%M:%S\")\r\n dict_list = {'User': user_mail,\r\n 'Timeout': timeout,\r\n 'Dictionary': list_dic,\r\n 'Create Date': date_time,\r\n 'track_terms_list': content\r\n }\r\n header = ['user_mail', 'Timeout', 'Dictionary', 'Create Date', 'list words']\r\n if os.path.isfile(self.file_track_terms_audit) == False:\r\n # check if the file exsist = if not: create file and print header to the file\r\n with open(self.file_track_terms_audit, 'a', newline='\\n') as file:\r\n try:\r\n write = csv.writer(file)\r\n write.writerow(header)\r\n write.writerows(self.values_list)\r\n file.close()\r\n except:\r\n print(\"Something went wrong when writing to the file\")\r\n else:\r\n self.values_list = list(dict_list.values())\r\n # print ('self.values_list :****',self.values_list)\r\n with open(self.file_track_terms_audit, 'a', newline='\\n') as file:\r\n try:\r\n write = csv.writer(file)\r\n self.values_list = [self.values_list]\r\n write.writerows(self.values_list)\r\n file.close()\r\n except:\r\n print(\"Something went wrong when writing to the file\")\r\n print('self.values_list:', self.values_list)\r\n\r\n window.close()\r\n\r\n print('track_terms_dic: ', track_terms_dic)\r\n print('dict_list:', dict_list)\r\n return (dict_list)\r\n\r\n # always check for closed window\r\n if event in (sg.WIN_CLOSED, 'Exit'):\r\n break\r\n\r\n if event == '-LIST-' and len(values['-LIST-']):\r\n sg.popup('Selected ', values['-LIST-'])\r\n\r\n if len(values['-DIGITS-']) and values['-DIGITS-'][-1] not in ('0123456789'):\r\n # delete last char from input\r\n window['-DIGITS-'].update(values['-DIGITS-'][:-1])\r\n\r\n window.close()",
"def status_display(self, message, level=0, field=0):\n #print(message)\n self.statusbar_txt.set(message)"
] |
[
"0.74672174",
"0.740839",
"0.7318725",
"0.7254243",
"0.72349834",
"0.71429914",
"0.6881725",
"0.6847033",
"0.6815938",
"0.6629776",
"0.6621973",
"0.66084623",
"0.6538907",
"0.652988",
"0.6500956",
"0.6434533",
"0.64031684",
"0.6329774",
"0.63157547",
"0.630548",
"0.62997496",
"0.6296191",
"0.6293993",
"0.62727886",
"0.6271266",
"0.62703264",
"0.62658626",
"0.62625295",
"0.6250218",
"0.62329245"
] |
0.7462986
|
1
|
Normalise an array between a given range.
|
def normalize_range(array, floor=0, ceil=1):
scaler = MinMaxScaler(feature_range=(floor, ceil), copy=True)
return scaler.fit_transform(array)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def normalize(array):\n high = array.max()\n low = array.min()\n rng = high - low\n array[:] = 1.0 - ((high - array) / rng)",
"def normalize(arr):\n m = np.min(arr)\n arr = arr - m\n M = np.max(arr)\n arr = arr / M\n return arr",
"def _normalize(array):\n\treturn (array - np.min(array))/(np.max(array)-np.min(array))",
"def normalize_data(X, range_d = None):\n n,d = X.shape\n\n if range_d is None:\n range_d = np.zeros([2,d])\n range_d[0,:] = np.min(X, axis = 0)\n range_d[1,:] = np.max(X, axis = 0)\n\n X = (X - range_d[0,:]) / (range_d[1,:] - range_d[0,:])\n\n return X",
"def normalize(X, low, high, dtype=None):\n X = np.asarray(X)\n minX, maxX = np.min(X), np.max(X)\n # normalize to [0...1].\n X = X - float(minX)\n X = X / float((maxX - minX))\n # scale to [low...high].\n X = X * (high-low)\n X = X + low\n if dtype is None:\n return np.asarray(X)\n return np.asarray(X, dtype=dtype)",
"def normalize(X, low, high, dtype=None):\n X = np.asarray(X)\n minX, maxX = np.min(X), np.max(X)\n # normalize to [0...1].\n X = X - float(minX)\n X = X / float((maxX - minX))\n # scale to [low...high].\n X = X * (high-low)\n X = X + low\n if dtype is None:\n return np.asarray(X)\n return np.asarray(X, dtype=dtype)",
"def normalize(array):\n array_min, array_max = array.min(), array.max()\n return ((array - array_min)/(array_max - array_min))",
"def normalize(A: np.array) -> np.array:\n for i in range(A.shape[1]):\n A[:, i] = (A[:, i] - np.min(A[:, i])) / (np.max(A[:, i] - np.min(A[:, i])))\n return A",
"def normalize(data, vmin=0, vmax=1):\n data = np.array(data, dtype=np.float64)\n return (vmin + (data - data.min()) * (vmax - vmin) / (data.max() - data.min())).tolist()",
"def normalize_data(data, min=0, max=1):\r\n import numpy as np\r\n assert isinstance(data, np.ndarray)\r\n\r\n max_value = np.max(data)\r\n min_value = np.min(data)\r\n\r\n scaled = np.interp(data, [min_value, max_value], [min, max])\r\n # convert to float64\r\n scaled = scaled.astype(np.float64)\r\n\r\n return scaled",
"def normalize(self, arr):\r\n\r\n\t\t#Set the cap for arr at self.value_max and self.value_max\r\n\t\t#this prevents outliers of breaking the previously predicted p_func\r\n\t\tarr_capped = arr * (arr <= self.value_max) + self.value_max * (arr > self.value_max)\t#cap to value_max\r\n\t\tarr_capped = arr_capped * (arr_capped >= self.value_min) + self.value_min * (arr_capped < self.value_min)\t#cap to value_min\r\n\r\n\t\t#Normalize array\r\n\t\tnorm_factor = self.get_norm_factor(arr_capped)\r\n\t\tnormalized = arr * norm_factor\r\n\r\n\t\treturn(normalized)",
"def normalize(arr: np.ndarray) -> np.ndarray:\n if max(arr) - min(arr) == 0:\n logger.warning(\n \"Normalize averted a div/0, the input data was:\\n {0}\".format(arr)\n )\n return np.ones(len(arr))\n return (arr - min(arr)) / (max(arr) - min(arr))",
"def normalize(array):\n\treturn array/np.max(array)",
"def normalize_array(arr, method=\"min_max\"):\r\n \r\n ret = torch.tensor(arr)\r\n if method == \"min_max\":\r\n ret -= torch.min(ret)\r\n ret /= torch.max(ret)\r\n elif method == \"mean_std\":\r\n ret -= torch.mean(ret)\r\n ret /= torch.std(ret)\r\n else:\r\n raise Exception(\"Invalid normalization method\")\r\n\r\n return 1 + ret",
"def normalise_0_1(arraylike):\n array_min = np.min(arraylike)\n array_max = np.max(arraylike)\n normalised = (arraylike - array_min) / (array_max - array_min)\n # convert to float\n normalised = np.array(normalised).astype(float)\n return normalised, array_min, array_max",
"def rescale_array(array, old_range, new_range, dtype):\n if not HAS_NUMPY:\n LOGGER.error(\"The Python library numpy is required for this operation\")\n return\n\n old_min, old_max = old_range\n if array.min() < old_min or array.max() > old_max:\n ## truncate:\n array = numpy.clip(array, old_min, old_max)\n new_min, new_max = new_range\n old_delta = float(old_max - old_min)\n new_delta = float(new_max - new_min)\n if old_delta == 0:\n return ((array - old_min) + (new_min + new_max) / 2).astype(dtype)\n else:\n return (new_min + (array - old_min) * new_delta / old_delta).astype(dtype)",
"def normalize(dataset):\n minVals = dataset.min(axis=0)\n maxVals = dataset.max(axis=0)\n factors = maxVals-minVals\n num = dataset.shape[0]\n norm_data = (dataset - np.tile(minVals,(num,1)))/np.tile(factors,(num,1)) \n return norm_data",
"def normalize(array, norm=\"l2\"):\n scaler = Normalizer(copy=True, norm=norm)\n return scaler.fit_transform(array)",
"def normalize(x):\n # TODO: Implement Function\n data_max = np.max(x)\n data_min = np.min(x)\n x = (x - data_min) / (data_max - data_min)\n return x",
"def normalize_array(cube, new_max, new_min):\n minimum, maximum = np.min(cube), np.max(cube)\n if maximum - minimum != 0:\n m = (new_max - new_min) / (maximum - minimum)\n b = new_min - m * minimum\n cube = m * cube + b\n return cube",
"def normalize(data):\n data_range = data.max() - data.min()\n #if data_range == 0.:\n # sys.exit(\"data.max() - data.min() == 0. !\")\n if stddev != 0.:\n data = (data - data.min()) / data_range\n\n return data",
"def normalise_between_2_values(arraylike, min_value, max_value, invert=False):\n # normalise array between min and max values\n normalised = (arraylike - min_value) / (max_value - min_value)\n # replace anything above 1 with 1\n normalised[normalised > 1] = 1\n # replace anything below 0 with 0\n normalised[normalised < 0] = 0\n # if desired, invert the normalised values\n if invert:\n normalised = abs(normalised - 1)\n return normalised",
"def normalize(data):\n\n\t#return [float(x) / pow(2, 15) for x in data]\n\n\tl = [float(x) / pow(2, 15) for x in data]\n\treturn np.asarray(l)",
"def normalize_minmax(data):\n _min = np.float(np.min(data))\n _max = np.float(np.max(data))\n if (_max-_min)!=0:\n img = (data - _min) / (_max-_min)\n else:\n img = np.zeros_like(data) \n return img",
"def normalizeToRange(data,max=255,min=0):\n if min: return (max-min)*normalize(data)+min\n else: return max*normalize2(data) # speeds up operation",
"def normalization(x, x_min=-5.12, x_max=5.12):\n for i in range(len(x.vect)):\n x.vect[i] = x_min + x.vect[i]*(x_max-x_min)\n return x",
"def normalized(array):\n ptp = np.ptp(array)\n if ptp == 0:\n ptp = 1\n return (array - np.min(array)) / ptp",
"def hist_normalize_linear(data, new_min, new_max):\n data_min = np.ma.min(data)\n data_max = np.ma.max(data)\n scaled = (data - data_min) * ((new_max - new_min) / (data_max - data_min))\n scaled.mask = data.mask\n return scaled",
"def normalize(arr):\n arr = arr.astype('float')\n # Do not touch the alpha channel\n for i in range(1):\n minval = arr[...,i].min()\n maxval = arr[...,i].max()\n if minval != maxval:\n arr[...,i] -= minval\n arr[...,i] *= (255.0/(maxval-minval))\n return arr",
"def normalize(data):\n min = np.min(data)\n if min:\n data = data + min\n return old_div(data,np.max(data))\n else: # if min is 0\n return old_div(data,np.max(data))"
] |
[
"0.79416615",
"0.78045434",
"0.75153875",
"0.74919647",
"0.74441695",
"0.74441695",
"0.74232936",
"0.7257156",
"0.71199733",
"0.7106798",
"0.7072435",
"0.6989358",
"0.6980073",
"0.6953394",
"0.69305813",
"0.691604",
"0.69146186",
"0.68793344",
"0.68729293",
"0.6854594",
"0.68457603",
"0.6818627",
"0.6813774",
"0.67609537",
"0.6752896",
"0.67371076",
"0.6729181",
"0.6721583",
"0.67179036",
"0.67095417"
] |
0.7987929
|
0
|
Normalise an array by its maximum absolute value. Scales and translates each feature individually such that the maximal absolute value of each feature in the array will be 1.0. It does not shift/center the data, and thus does not destroy any sparsity.
|
def normalize_max_absolute(array):
scaler = MaxAbsScaler(copy=True)
return scaler.fit_transform(array)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def max_normalization(array):\n return 1/np.max(array) * array.squeeze(axis=1)",
"def normalize(array):\n\treturn array/np.max(array)",
"def normalize(my_array: np.ndarray) -> np.ndarray:\n\n return np.abs(my_array)/np.max(np.abs(my_array))",
"def maxabs_scale(X, *, axis=..., copy=...):\n ...",
"def normalize(array):\n high = array.max()\n low = array.min()\n rng = high - low\n array[:] = 1.0 - ((high - array) / rng)",
"def normalize(self, arr):\r\n\r\n\t\t#Set the cap for arr at self.value_max and self.value_max\r\n\t\t#this prevents outliers of breaking the previously predicted p_func\r\n\t\tarr_capped = arr * (arr <= self.value_max) + self.value_max * (arr > self.value_max)\t#cap to value_max\r\n\t\tarr_capped = arr_capped * (arr_capped >= self.value_min) + self.value_min * (arr_capped < self.value_min)\t#cap to value_min\r\n\r\n\t\t#Normalize array\r\n\t\tnorm_factor = self.get_norm_factor(arr_capped)\r\n\t\tnormalized = arr * norm_factor\r\n\r\n\t\treturn(normalized)",
"def normalize_array(a, norm_max=255):\n c = a - np.min(a.flatten())\n c = c / np.max(c)\n centered = c * norm_max\n return centered",
"def _normalize(array):\n\treturn (array - np.min(array))/(np.max(array)-np.min(array))",
"def normalize(A: np.array) -> np.array:\n for i in range(A.shape[1]):\n A[:, i] = (A[:, i] - np.min(A[:, i])) / (np.max(A[:, i] - np.min(A[:, i])))\n return A",
"def normalise_max_abs(vector):\n\n # Check vector shape\n assert len(vector.shape) == 2\n assert vector.shape[0] < vector.shape[1]\n\n # Normalise\n for i in range(vector.shape[0]):\n maxabs = np.nanmax(np.abs(vector[i]))\n vector[i] = safe_divide(vector[i], maxabs)\n\n return vector",
"def minmax_normalize(X):\n # X -= X.min()\n # X /= X.max()\n # X -= 0.5\n X = (X-X.min()) / (X.max() - X.min())\n return X",
"def normalize(arr):\n m = np.min(arr)\n arr = arr - m\n M = np.max(arr)\n arr = arr / M\n return arr",
"def normalize_features(array):\n \n array_normalized = (array-array.mean())/array.std()\n mu = array.mean()\n sigma = array.std()\n\n return array_normalized, mu, sigma",
"def normalized(array):\n ptp = np.ptp(array)\n if ptp == 0:\n ptp = 1\n return (array - np.min(array)) / ptp",
"def normalize(array):\n array_min, array_max = array.min(), array.max()\n return ((array - array_min)/(array_max - array_min))",
"def normalize(a, new_max=1.0):\n a = (a - a.min())\n a = a/a.max()\n a *= new_max\n return a",
"def normalize(array, norm=\"l2\"):\n scaler = Normalizer(copy=True, norm=norm)\n return scaler.fit_transform(array)",
"def min_max_normalization(input_data):\n\n # Insert debugging assertions\n assert type(input_data) is np.ndarray, \"The 'input_data' must be numpy array.\"\n\n # Get the minimum and maximun values of the input numpy array along the axis \n Max = np.max(input_data, axis = 0)\n Min = np.min(input_data, axis = 0)\n\n # Min-max normalization \n normalized_input_data = (input_data - Min) / (Max - Min + sys.float_info.min)\n\n # Return normalized input data\n return normalized_input_data",
"def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)",
"def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)",
"def normalise(x):\n x = np.copy(x)\n n_cols = x.shape[1]\n for col_index in range(n_cols):\n col = x[:, col_index]\n factor = np.max(col)\n x[:, col_index] = col / factor\n\n return x",
"def mms_scale(values):\r\n mms = MinMaxScaler()\r\n return mms.fit_transform(values)",
"def minmax_normalize(samples, out=None):\n if out is None:\n dtype = np.common_type(np.empty(0, 'float32'), samples)\n out = np.array(samples, dtype=dtype, copy=True)\n else:\n out[:] = samples\n\n sample_mins = np.min(samples, -1)[..., None]\n sample_maxes = np.max(samples, -1)[..., None]\n out -= sample_mins\n out /= (sample_maxes - sample_mins)\n return out",
"def normalize(array: np.ndarray, value: float | None = None) -> np.ndarray:\n if value is None:\n val = array.max()\n else:\n val = value\n array = array / val\n return array",
"def npmaxabs(arr: np.ndarray) -> float:\n return np.max(np.abs(arr))",
"def scale_dataset(ds):\n for i in range(0,ds.dims):\n fmax = ds.data[0][i]\n for j in range(1,len(ds)):\n curr = ds.data[j][i]\n if curr > fmax:\n fmax = curr \n if fmax > 0:\n for j in range(0,len(ds)):\n ds.data[j][i] /= fmax",
"def normalize(arr):\n arr = arr.astype('float')\n # Do not touch the alpha channel\n for i in range(1):\n minval = arr[...,i].min()\n maxval = arr[...,i].max()\n if minval != maxval:\n arr[...,i] -= minval\n arr[...,i] *= (255.0/(maxval-minval))\n return arr",
"def normalise_0_1(arraylike):\n array_min = np.min(arraylike)\n array_max = np.max(arraylike)\n normalised = (arraylike - array_min) / (array_max - array_min)\n # convert to float\n normalised = np.array(normalised).astype(float)\n return normalised, array_min, array_max",
"def test_scale_features_min_max_norm(self):\n data = array([[0.564, 20.661], [-18.512, 41.168], [-0.009, 20.440]])\n cdata = CData(data)\n\n # correct answer computed with Mathematica\n # TODO: can we compute the right answer in Python?\n answer = array([[1, 0.0106619], [0, 1], [0.969962, 0]])\n\n # perform min-max norm scaling on features and check answer\n cdata.scale_features('min-max norm')\n self.assertTrue(allclose(cdata.data, answer))",
"def standardise(self):\n if self.vector.shape is ():\n return\n if self.dimensionality() != 1:\n # TODO: implement\n raise NotImplementedError\n max_value = 1.0 * max(self.vector)\n if max_value == 0.0:\n # Nothing to do\n return\n self.vector = self.vector.astype('float64') / max_value"
] |
[
"0.7536234",
"0.71070904",
"0.70744723",
"0.6854876",
"0.6705524",
"0.66903067",
"0.6577902",
"0.6577724",
"0.65433544",
"0.6540987",
"0.63960224",
"0.63942844",
"0.638665",
"0.63782936",
"0.6344834",
"0.6317285",
"0.62940407",
"0.62483877",
"0.6247812",
"0.6247812",
"0.6189317",
"0.61798406",
"0.61779034",
"0.61678255",
"0.61672604",
"0.6161706",
"0.6159368",
"0.61536014",
"0.6150235",
"0.61247945"
] |
0.8425474
|
0
|
Return a diagonal mask computed from an array. Useful when the data is the same if you transpose the array, eg in a heatmap.
|
def get_diagonal_mask(data):
mask = np.zeros_like(data, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
return mask
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_diagonal(self, array):\n diags = [array[::-1, :].diagonal(i) for i in range(-array.shape[0] + 1, array.shape[1])]\n\n # Now back to the original array to get the upper-left-to-lower-right diagonals,\n # starting from the right, so the range needed for shape (x,y) was y-1 to -x+1 descending.\n diags.extend(array.diagonal(i) for i in range(array.shape[1] - 1, -array.shape[0], -1))\n return diags",
"def diagonal(a, offset=0, axis1=0, axis2=1):\n # TODO(okuta): check type\n return a.diagonal(offset, axis1, axis2)",
"def makeMaskFromArray(array):\n if array is None: return None\n cls = globals()[\"Mask%s\" % suffixes[str(array.dtype.type)]]\n return cls(array)",
"def dilate(array):\n # kernel = [[1] * 7] * 7 # blocky 3-pixel dilation\n y, x = np.ogrid[-3:4, -3:4]\n kernel = ((x * x) + (y * y) <= 3.5**2) # disk-like 3-pixel radial dilation\n return scipy.ndimage.binary_dilation(array, structure=kernel)",
"def diagonal(nd):\n assert nd.ndim == 2, \"diagonal requires 2 dimensional ndarray\"\n shape_min = hl.min(nd.shape[0], nd.shape[1])\n return hl.nd.array(hl.range(hl.int32(shape_min)).map(lambda i: nd[i, i]))",
"def solution(array):\n rows = array.shape[0]\n cols = array.shape[1]\n result = np.ones((rows,cols))\n result[1:rows-1,1:cols-1] = 0\n return result",
"def flatten_array(array, mask=None):\n if isinstance(array, (list, tuple)):\n if mask is None:\n return array\n array = np.asarray(array)\n if isinstance(array, np.ndarray):\n if mask is not None:\n if not isinstance(array, np.ndarray):\n raise Exception(f\"Mask type {repr(type(mask))} should be the same as array type {repr(type(array))}\")\n return array[mask]\n else:\n return array.reshape(-1)\n elif torch.is_tensor(array):\n if mask is not None:\n if not torch.is_tensor(mask):\n raise Exception(f\"Mask type {repr(type(mask))} should be the same as array type {repr(type(array))}\")\n return array[mask]\n else:\n return array.reshape(-1)\n else:\n raise Exception(f\"Unrecognized array type {repr(type(array))} during array flattening (mask type is {repr(type(mask))}')\")",
"def diag(cls, elements, domain):\n return DDM.diag(elements, domain).to_dfm()",
"def diag_indices_from(arr):\r\n if not arr.ndim >= 2:\r\n raise ValueError(\"input array must be at least 2-d\")\r\n # For more than d=2, the strided formula is only valid for arrays with\r\n # all dimensions equal, so we check first.\r\n if not np.alltrue(np.diff(arr.shape) == 0):\r\n raise ValueError(\"All dimensions of input must be of equal length\")\r\n\r\n return diag_indices(arr.shape[0], arr.ndim)",
"def create_diagonal(m: NumpyRealArray) -> NumpyRealArray:\n indices = (..., *np.diag_indices(m.shape[-1]))\n retval = np.zeros((*m.shape, m.shape[-1]), dtype=m.dtype)\n retval[indices] = m\n return retval",
"def writeLaserMask(self, array):\n offset = self.activeOffset\n shape = self.activeShape\n stride = self.activeStride\n \n target = pg.subArray(array, offset, shape, stride)\n target[:] = 1",
"def _maskedCollapse(array_in, method): \n import numpy.ma as ma\n \n # Perform an numpy.ma array collapse along the z-axis\n if method == 'sum':\n print('(3d_collapse): Masked sum collapse of extracted slices ...')\n collapsed_array = ma.sum(array_in, axis=0)\n \n elif method == 'mean':\n print('(3d_collapse): Masked mean of extracted slices:')\n collapsed_array = ma.mean(array_in, axis=0)\n \n elif method == 'median':\n print('(3d_collapse): Masked median of extracted slices:')\n collapsed_array = ma.extras.median(array_in, axis=0)\n \n # Returns an array of type numpy.array \n return collapsed_array.data",
"def mask(mask_key, data):\r\n _m = array.array(\"B\", mask_key)\r\n _d = array.array(\"B\", data)\r\n for i in xrange(len(_d)):\r\n _d[i] ^= _m[i % 4]\r\n return _d.tostring()",
"def make_mask(data, pad):\n def subsequent_mask(size):\n \"\"\" helper function for creating the masks. \"\"\"\n attn_shape = (1, size, size)\n subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')\n return torch.from_numpy(subsequent_mask) == 0\n\n mask = (data != pad).unsqueeze(-2)\n mask = mask & Variable(\n subsequent_mask(data.size(-1)).type_as(mask.data))\n return mask",
"def make_mask(data, pad):\n\n def subsequent_mask(size):\n \"\"\" helper function for creating the masks. \"\"\"\n attn_shape = (1, size, size)\n subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')\n return torch.from_numpy(subsequent_mask) == 0\n\n mask = (data != pad).unsqueeze(-2)\n mask = mask & Variable(\n subsequent_mask(data.size(-1)).type_as(mask.data))\n return mask",
"def zero_diag(mat):\n\n return replace_diag(mat, np.zeros(mat.shape[0]))",
"def apply_diagonal_inplace(self, array: 'Nparray') -> None:\n beta_ptr = 0\n\n if array.size == 2 * self.norb():\n beta_ptr = self.norb()\n\n elif array.size != self.norb():\n raise ValueError('Non-diagonal array passed'\n ' into apply_diagonal_inplace')\n\n if not array.flags['C_CONTIGUOUS']:\n array = numpy.copy(array)\n\n if fqe.settings.use_accelerated_code:\n aarray = array[:self.norb()]\n barray = array[beta_ptr:]\n _apply_diagonal_inplace(self.coeff, aarray, barray,\n self._core.string_alpha_all(),\n self._core.string_beta_all())\n else:\n alpha = numpy.zeros((self._core.lena(),), dtype=numpy.complex128)\n beta = numpy.zeros((self._core.lenb(),), dtype=numpy.complex128)\n\n for alp_cnf in range(self._core.lena()):\n occupation = self._core.string_alpha(alp_cnf)\n diag_ele = 0.0\n for ind in integer_index(occupation):\n diag_ele += array[ind]\n alpha[alp_cnf] = diag_ele\n for bet_cnf in range(self._core.lenb()):\n occupation = self._core.string_beta(bet_cnf)\n diag_ele = 0.0\n for ind in integer_index(occupation):\n diag_ele += array[beta_ptr + ind]\n beta[bet_cnf] = diag_ele\n\n for alp_cnf in range(self._core.lena()):\n for bet_cnf in range(self._core.lenb()):\n self.coeff[alp_cnf,\n bet_cnf] *= alpha[alp_cnf] + beta[bet_cnf]",
"def subsequent_mask(mask_size):\n mask_shape = (1, mask_size, mask_size)\n # Create a lower-triangle matrix at the primary diagonal (0th)\n # such that all the elements above the diagonal are 0.\n mask = np.tril(np.ones(mask_shape), k=0).astype('uint8')\n mask = torch.from_numpy(mask)\n return mask",
"def apply_mask(self, array):\n # assert that the array and Mask.data are of the same size\n assert array.shape == self.shape, \"array and mask should be of the same shape\"\n\n array_copy = array.copy()\n\n # Applying mask\n # apply func_true where Mask.data is True\n array_copy[self.data] = map(self.func_true, array_copy[self.data])\n\n # apply func_false where Mask.data is False\n array_copy[np.invert(self.data)] = map(self.func_false, array_copy[np.invert(self.data)])\n\n return array_copy",
"def diag(self):\n in_diag = (self.rows == self.cols)\n diag = np.zeros(min(self.n, self.n), dtype=np.float64) # default 0.\n diag[self.rows[in_diag]] = self.vals[in_diag]\n return diag",
"def build_mask(dqarr, bitvalue):\n bitvalue = interpret_bit_flags(bitvalue, mnemonic_map=pixel)\n\n if bitvalue is None:\n return (np.ones(dqarr.shape, dtype=np.uint8))\n return np.logical_not(np.bitwise_and(dqarr, ~bitvalue)).astype(np.uint8)",
"def diagonal(self):\n M = self.rep\n m, n = self.shape\n return [M[i, i] for i in range(min(m, n))]",
"def padded_mask(array, first_pad, second_pad):\n # Split the array into two boolean arrays\n # Make a NaN mask, 1 where NaN\n nan_mask = np.isnan(array)\n # Make a mask where array 1s are 1 and everything else (0 and NaN) is 0\n first_round_mask = (array == 1)\n # Dilate <first_pad> times\n for i in range(first_pad):\n first_round_mask = dilate(first_round_mask, nan_mask)\n # Save this state of the mask\n second_round_mask = np.copy(first_round_mask)\n # Dilate <second_pad> times\n for i in range(second_pad):\n second_round_mask = dilate(second_round_mask, nan_mask)\n # Compare the first and second round dilations\n result = (second_round_mask & ~first_round_mask).astype(float)\n # Conserve NaNs\n result[nan_mask] = np.nan\n return result",
"def row_col_diag(arr):\n three_sets = np.zeros((8,3), dtype=int)\n for i in range(arr.shape[0]):\n three_sets[i] = arr[i]\n for i in range(arr.shape[1]):\n three_sets[i+3] = arr[:,i]\n three_sets[6] = np.diag(arr)\n three_sets[7] = np.diag(np.flipud(arr))\n return three_sets",
"def fill_diagonal(a, val):\r\n return fill_diagonal_(a, val)",
"def diagonal(matrix):\n if sp.sparse.issparse(matrix):\n diag = np.array(matrix.diagonal())\n else:\n diag = np.diagonal(matrix).copy()\n return diag",
"def evolve_diagonal(self, array: 'Nparray',\n inplace: bool = False) -> 'Nparray':\n beta_ptr = 0\n\n if array.size == 2 * self.norb():\n beta_ptr = self.norb()\n\n elif array.size != self.norb():\n raise ValueError('Non-diagonal array passed into evolve_diagonal')\n\n if inplace:\n data = self.coeff\n else:\n data = numpy.copy(self.coeff).astype(numpy.complex128)\n\n if not array.flags['C_CONTIGUOUS']:\n array = numpy.copy(array)\n\n if fqe.settings.use_accelerated_code:\n aarray = array[:self.norb()]\n barray = array[beta_ptr:]\n _evolve_diagonal_inplace(data, aarray, barray,\n self._core.string_alpha_all(),\n self._core.string_beta_all())\n else:\n for alp_cnf in range(self._core.lena()):\n occupation = self._core.string_alpha(alp_cnf)\n diag_ele = 0.0\n for ind in integer_index(self._core.string_alpha(alp_cnf)):\n diag_ele += array[ind]\n\n if diag_ele != 0.0:\n data[alp_cnf, :] *= numpy.exp(diag_ele)\n\n for bet_cnf in range(self._core.lenb()):\n occupation = self._core.string_beta(bet_cnf)\n diag_ele = 0.0\n for ind in integer_index(occupation):\n diag_ele += array[beta_ptr + ind]\n\n if diag_ele:\n data[:, bet_cnf] *= numpy.exp(diag_ele)\n\n return data",
"def writeScanMask(self, array):\n offset = self.scanOffset\n shape = self.scanShape\n stride = self.scanStride\n \n target = pg.subArray(array, offset, shape, stride)\n target[:] = 1",
"def DiagonalGate():\n\n def f(x): # pylint: disable=invalid-name\n # x : [batch, 1, length, depth]\n x = jnp.pad(x, [(0, 0), (0, 0), (1, 1), (0, 0)],\n mode='constant', constant_values=0.0)\n depth = x.shape[-1] // 3\n assert 3 * depth == x.shape[-1], ('Depth must be divisible by 3', depth,\n x.shape)\n xs = [\n x[:, :, :-2, :depth], x[:, :, 1:-1, depth:2 * depth],\n x[:, :, 2:, 2 * depth:3 * depth]\n ]\n return jnp.concatenate(xs, axis=3)\n return tl.Fn('DiagonalGate', f)",
"def fold_diag(pixels):\n copy = blank_image(len(pixels), len(pixels[0])) \n for r in range(len(pixels)):\n for c in range(len(pixels[0])):\n copy[r][c] = pixels[r][c]\n for r in range(len(pixels)):\n for c in range(r):\n copy[r][c] = [255, 255, 255]\n return copy"
] |
[
"0.72953194",
"0.61421484",
"0.6136961",
"0.58667374",
"0.5818971",
"0.558432",
"0.5531007",
"0.55216306",
"0.5488027",
"0.5458054",
"0.5457682",
"0.5436873",
"0.54321617",
"0.5413237",
"0.54027534",
"0.5357985",
"0.5352605",
"0.53214806",
"0.53060347",
"0.5296002",
"0.5281449",
"0.52566785",
"0.52535075",
"0.52481425",
"0.5234038",
"0.5229512",
"0.5224252",
"0.52169454",
"0.52145994",
"0.52086914"
] |
0.706273
|
1
|
|coro| Refetches the inventory.
|
async def update(self) -> None:
data = await self._state.http.get_user_inventory(self.owner.id64, self.game.app_id, self.game.context_id)
self._update(data)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def load_inventory(self):\n for item in self.items:\n self.rooms[int(item.initial_room_id) - 1].inventory.add(item)",
"def getitem(self):\n self.inventory += 1",
"def inventory(self, time: int) -> Inventory:\n self.refreshDroneStatus(time)\n return self.__inventory",
"async def get_inventory(request: web.Request, ) -> web.Response:\n return web.Response(status=200)",
"def openinv(cls): #THIS DOESN'T NEED TO BE MODIFIED!\n\n while True:\n inventory_items = {thing.id: thing.name for thing in cls.inventory}\n inventory_items[\"exit\"] = \"Exit Inventory\"\n inventory_items[\"newln\"] = \"\"\n inventory_items[\"playername\"] = str(gray('\"{}\"'.format(cls.name)))\n inventory_items[\"lv\"] = str(gray(\"LV: {}\".format(cls.lv)))\n inventory_items[\"hp\"] = str(gray(\"HP: {}/{}\".format(cls.hp, cls.max_hp)))\n inventory_items[\"exp\"] = str(gray(\"EXP: {}/40\".format(cls.exp)))\n\n choice = Menu.menu(\n title = \"Inventory\",\n contents = inventory_items \n )\n if choice == \"exit\":\n Terminal.clear_all()\n return\n while True:\n displayed_item = next((thing for thing in cls.inventory if thing.id == choice), None)\n final_choice = Menu.menu(\n title = displayed_item.name,\n contents = {\n \"interact\":displayed_item.interact_label,\n \"inspect\":\"Inspect\",\n \"drop\":\"Drop\",\n \"back\":\"Back\"\n }\n )\n if final_choice == \"back\":\n break\n if final_choice == \"interact\":\n use = displayed_item.interact()\n Terminal.clear_all()\n print(use[\"message\"])\n if \"heal_\" in use[\"action\"]:\n cls.hp += int(use[\"action\"].replace(\"heal_\", ''))\n if cls.hp > cls.max_hp:\n cls.hp = cls.max_hp\n cls.inventory.remove(displayed_item)\n Game.standard_wait()\n break\n if final_choice == \"inspect\":\n Terminal.clear_all()\n print(displayed_item)\n Game.standard_wait()\n continue\n if final_choice == \"drop\":\n Terminal.clear_all()\n print(\"You dropped the {}\".format(displayed_item.name))\n cls.inventory.remove(displayed_item)\n Game.standard_wait()\n break",
"def get_inventory():\n return INVENTORY",
"def get_inventory(self, context):\n with LoggingSessionContext(context) as logger, LogCommand(\n logger, \"get_inventory\"\n ):\n api = CloudShellSessionContext(context).get_api()\n\n resource_config = FirewallResourceConfig.from_context(\n self.SHELL_NAME, context, api, self.SUPPORTED_OS\n )\n\n cli_configurator = CheckpointCliConfigurator(\n self._cli, resource_config, logger\n )\n enable_disable_snmp_flow = CheckpointEnableDisableSnmpFlow(\n cli_configurator, logger\n )\n snmp_configurator = EnableDisableSnmpConfigurator(\n enable_disable_snmp_flow, resource_config, logger\n )\n\n resource_model = FirewallResourceModel.from_resource_config(resource_config)\n\n autoload_operations = CheckpointSnmpAutoloadFlow(logger, snmp_configurator)\n logger.info(\"Autoload started\")\n response = autoload_operations.discover(self.SUPPORTED_OS, resource_model)\n logger.info(\"Autoload completed\")\n return response",
"async def inv(self, ctx):\r\n author = ctx.author\r\n with DB() as db:\r\n company = await self.get_active_company(ctx, db, author)\r\n stock = self.iex.get_held_stocks(db, company.id)\r\n inventory = []\r\n for s in stock:\r\n close = await self.get_latest_close(ctx, db, s.symbol)\r\n inventory.append([s.symbol, s.quantity, close.close * s.quantity])\r\n inv_df = pd.DataFrame(inventory, columns=['Symbol', 'Quantity', 'Value'])\r\n aggregated = tabulate(inv_df.groupby(['Symbol']).sum().reset_index(), headers=['Symbol', 'Quantity', 'Value'])\r\n await ctx.send(f'```{aggregated}```')",
"def inventory(self):\n data = self.client.inventory(self.creds, self.transaction, self.environment)\n return list(data) if isinstance(data, set) else data",
"def inventory(self):\n return self._inventory",
"async def list_inventory_endpoint(request):\n hotel_id = request.args[\"hotel_id\"][0]\n start_date = request.args[\"start_date\"][0]\n end_date = request.args[\"end_date\"][0]\n inventory = model.list_inventory(hotel_id, start_date, end_date)\n if inventory == model.OPERATION_ERROR_RETURN_CODE:\n return json({\"success\": False})\n return json({\"success\": True, \"inventory\": inventory})",
"async def stocks(self, ctx):\n\t\tpass",
"def get_inventory(self, context):\n # See below some example code demonstrating how to return the resource structure and attributes\n # In real life, this code will be preceded by SNMP/other calls to the resource details and will not be static\n # run 'shellfoundry generate' in order to create classes that represent your data model\n\n '''\n resource = LanforgeResource.create_from_context(context)\n resource.vendor = 'specify the shell vendor'\n resource.model = 'specify the shell model'\n\n port1 = ResourcePort('Port 1')\n port1.ipv4_address = '192.168.10.7'\n resource.add_sub_resource('1', port1)\n\n return resource.create_autoload_details()\n '''\n return AutoLoadDetails([], [])",
"def get_with_inventory(self, context, id_):\n try:\n db_resource_data = self.db_api.get_resource(context, id_)\n res_properties = self.db_api.get_properties(context, id_)\n\n # for non resource managers return get\n if (db_resource_data['type'] !=\n eon_const.EON_RESOURCE_TYPE_ESX_CLUSTER):\n return _make_response(db_resource_data)\n\n res_mgr_obj = (\n self.db_api.get_resource_managers_by_resource_id(context,\n id_))\n driver_obj = driver.load_resource_driver(db_resource_data['type'])\n _inventory = driver_obj.get_res_inventory(res_mgr_obj,\n res_properties)\n _resource_data = _make_response(db_resource_data,\n inventory=_inventory)\n # (NOTE) Here setting the details of resource manager for the\n # resource\n _res_mgr_data = _make_response(res_mgr_obj, meta_data=False)\n _resource_data[eon_const.RSRC_MGR_INFO] = _res_mgr_data\n\n except exception.NotFound as e:\n LOG.exception(e)\n raise e\n except Exception as e:\n msg = _(\"Error retrieving the 'eon_resource':%s. Reason: %s\") % (\n id_, e)\n log_msg = (\"Error retrieving the 'eon_resource':%s.\"\n \" Reason: %s\") % (id_, e)\n LOG.exception(log_msg)\n raise exception.RetrieveException(msg)\n\n LOG.info(\"The Resource data %s \"\n % logging.mask_password(_resource_data))\n return _resource_data",
"def get_inventory(self, node):",
"def fetch(self, vault_client):\n result = self.read(vault_client)\n if result:\n if isinstance(result, dict) and 'data' in result:\n self.existing = result['data']\n else:\n self.existing = result\n else:\n self.existing = None",
"def get_with_inventory(self, context, id_):\n try:\n db_resource_mgr_data = self.db_api.get_resource_manager(\n context, id_)\n db_props_data = self.db_api.get_resource_mgr_properties(context,\n id_, key=eon_const.RESOURCE_MGR_STATE_KEY)\n\n driver_obj = driver.load_resource_mgr_driver(\n db_resource_mgr_data['type'])\n inventory = driver_obj.get_inventory(db_resource_mgr_data)\n resource_mgr_data = _make_response(db_resource_mgr_data,\n property_list=db_props_data,\n inventory=inventory)\n LOG.debug(\"[%s] Resource data %s\"\n % (id_, logging.mask_password(resource_mgr_data)))\n return resource_mgr_data\n\n except exception.NotFound as e:\n LOG.error(e)\n raise e\n except Exception as e:\n msg = \"Error retrieving the 'resource':%s. Reason: %s\" % (\n id_, e.message)\n LOG.exception(msg)\n raise exception.RetrieveException(e.message)",
"def get_inventory_from_cache(self):\n cache = open(self.cache_path_cache, 'r')\n json_inventory = cache.read()\n return json_inventory",
"def get_inventory(self, context):\n # See below some example code demonstrating how to return the resource structure\n # and attributes. In real life, of course, if the actual values are not static,\n # this code would be preceded by some SNMP/other calls to get the actual resource information\n '''\n # Add sub resources details\n sub_resources = [ AutoLoadResource(model ='Generic Chassis',name= 'Chassis 1', relative_address='1'),\n AutoLoadResource(model='Generic Module',name= 'Module 1',relative_address= '1/1'),\n AutoLoadResource(model='Generic Port',name= 'Port 1', relative_address='1/1/1'),\n AutoLoadResource(model='Generic Port', name='Port 2', relative_address='1/1/2'),\n AutoLoadResource(model='Generic Power Port', name='Power Port', relative_address='1/PP1')]\n\n\n attributes = [ AutoLoadAttribute(relative_address='', attribute_name='Location', attribute_value='Santa Clara Lab'),\n AutoLoadAttribute('', 'Model', 'Catalyst 3850'),\n AutoLoadAttribute('', 'Vendor', 'Cisco'),\n AutoLoadAttribute('1', 'Serial Number', 'JAE053002JD'),\n AutoLoadAttribute('1', 'Model', 'WS-X4232-GB-RJ'),\n AutoLoadAttribute('1/1', 'Model', 'WS-X4233-GB-EJ'),\n AutoLoadAttribute('1/1', 'Serial Number', 'RVE056702UD'),\n AutoLoadAttribute('1/1/1', 'MAC Address', 'fe80::e10c:f055:f7f1:bb7t16'),\n AutoLoadAttribute('1/1/1', 'IPv4 Address', '192.168.10.7'),\n AutoLoadAttribute('1/1/2', 'MAC Address', 'te67::e40c:g755:f55y:gh7w36'),\n AutoLoadAttribute('1/1/2', 'IPv4 Address', '192.168.10.9'),\n AutoLoadAttribute('1/PP1', 'Model', 'WS-X4232-GB-RJ'),\n AutoLoadAttribute('1/PP1', 'Port Description', 'Power'),\n AutoLoadAttribute('1/PP1', 'Serial Number', 'RVE056702UD')]\n\n return AutoLoadDetails(sub_resources,attributes)\n '''\n\n self._log(context, 'Begin autoload')\n resources = []\n attributes = []\n\n\n attributes.append(AutoLoadAttribute('', 'replication_address', self.get_replication_address(context)))\n attributes.append(AutoLoadAttribute('', 'connection_key', self.get_connection_key(context)))\n\n networks = self._get_newtork_interfaces(context)\n self._log(context, 'got networks')\n\n controllers = self._get_controllers(context)\n self._log(context, 'got controllers')\n ports = self._get_ports(context)\n\n model = None\n for controller in controllers:\n self._log(context, 'Processing ctrlt: ' + controller['name'] + ':' + controller['model'])\n resources.append(AutoLoadResource(model='Generic Storage Controller', name=controller['name'],\n relative_address=controller['name']))\n if model is None:\n model = controller['model']\n\n attributes.append(AutoLoadAttribute('', 'Model', model))\n\n for network in networks:\n self._log(context, 'Processing netwk: ' + network['name'] + ':' + str(network['address']))\n net_name = network['name']\n controller = net_name.split('.')[0]\n if 'vir0' in controller or 'vir1' in controller:\n attributes.append(AutoLoadAttribute('',str(controller + '_address'), str(network['address'])))\n continue\n if 'vir' in controller:\n continue\n if 'management' not in network['services']:\n continue\n resources.append(AutoLoadResource(model='Storage Network Port', name=net_name,\n relative_address=controller.upper() + '/' + str(network['address'])))\n\n for port in ports:\n if port['iqn'] is not None:\n port_name = port['name']\n controller = port_name.split('.')[0]\n resources.append(AutoLoadResource(model='iSCSI Storage Port', name=port['name'],\n relative_address=controller + '/' + port['portal']))\n attributes.append(AutoLoadAttribute(controller + '/' + port['portal'], 'iqn', port['iqn']))\n elif port['wwn'] is not None:\n port_name = port['name']\n controller = port_name.split('.')[0]\n resources.append(AutoLoadResource(model='FC Storage Port', name=port['name'],\n relative_address=controller + '/' + port['name'].split('.')[1]))\n attributes.append(AutoLoadAttribute(controller + '/' + port['name'].split('.')[1], 'wwn', port['wwn']))\n\n return AutoLoadDetails(resources, attributes)",
"def get_inventory(self):\n raise NotImplementedError(\"Subclasses define what returning the inventory entails\")",
"def refresh_inventory(self):\n if self.skill_tree_displaying:\n return\n self.inventory_tiles, _ = player_panel_renderer.draw_inventory(self.player_dict['inventory'], refresh=True)",
"async def get(self):\n identifier = self.data[\"id\"]\n item = self.core.item_manager.items.get(identifier)\n if not item:\n return self.error(\n ERROR_ITEM_NOT_FOUND,\n f\"No item found with identifier {identifier}\", status_code=404)\n\n return self.json(data=list(item.actions.keys()))",
"def test_get_dealer_active_inventory(self):\n pass",
"def get_inventory(self, resources):\n uri = '/api/services/inventory'\n body = {'resources': resources}\n result = self.session.post(uri, body=body)\n return result",
"def display_inventory(self):\n header = \"Carrying:\\n\"\n nothing_func = lambda *args: None\n action_list = [(item, nothing_func) for item in self.inventory]\n if len(action_list) == 0:\n header += \"Nothing at all\"\n events.trigger_event(\"print_list\", action_list, header=header)",
"def populate_initial_inventory(self):\r\n\r\n weapons_file = open('initial-inventory.json', \"r\")\r\n json_data = json.loads(weapons_file.read())\r\n weapons_file.close()\r\n\r\n weapons = json_data['weapons']\r\n for weapon in weapons:\r\n requests.post(\"http://\" + self.ip_address + \":3000/Weapons\", data=weapon)",
"def read_inventory_file():\n try:\n with open('inventory', 'r') as file:\n inventory = file.read()\n return inventory\n except OSError:\n pass",
"def inventory(env):\n envs = environments()\n check_env(env, envs)\n\n headers = [] # a list of fact descriptions to go\n # in the table header\n fact_names = [] # a list of inventory fact names\n fact_data = {} # a multidimensional dict for node and\n # fact data\n\n # load the list of items/facts we want in our inventory\n try:\n inv_facts = app.config['INVENTORY_FACTS']\n except KeyError:\n inv_facts = [('Hostname', 'fqdn'),\n ('IP Address', 'ipaddress'),\n ('OS', 'lsbdistdescription'),\n ('Architecture', 'hardwaremodel'),\n ('Kernel Version', 'kernelrelease')]\n\n # generate a list of descriptions and a list of fact names\n # from the list of tuples inv_facts.\n for desc, name in inv_facts:\n headers.append(desc)\n fact_names.append(name)\n\n query = AndOperator()\n fact_query = OrOperator()\n fact_query.add([EqualsOperator(\"name\", name) for name in fact_names])\n\n if env != '*':\n query.add(EqualsOperator(\"environment\", env))\n\n query.add(fact_query)\n\n # get all the facts from PuppetDB\n facts = puppetdb.facts(query=query)\n\n for fact in facts:\n if fact.node not in fact_data:\n fact_data[fact.node] = {}\n\n fact_data[fact.node][fact.name] = fact.value\n\n return Response(stream_with_context(\n stream_template(\n 'inventory.html',\n headers=headers,\n fact_names=fact_names,\n fact_data=fact_data,\n envs=envs,\n current_env=env\n )))",
"def inventory(env):\n envs = environments()\n check_env(env, envs)\n headers, fact_names = inventory_facts()\n\n return render_template(\n 'inventory.html',\n envs=envs,\n current_env=env,\n fact_headers=headers)",
"def collect(item):\n inventory.append(item)\n print(f'You now have: {inventory}')"
] |
[
"0.6672313",
"0.6481613",
"0.64376086",
"0.6390931",
"0.6384816",
"0.63713384",
"0.6365049",
"0.60815936",
"0.6057353",
"0.6050127",
"0.60095286",
"0.59535676",
"0.5945936",
"0.5925097",
"0.5903476",
"0.5844133",
"0.5825453",
"0.58238333",
"0.580484",
"0.57929665",
"0.5770575",
"0.5766562",
"0.5720572",
"0.5709943",
"0.56841534",
"0.5672511",
"0.5639652",
"0.5637677",
"0.56189495",
"0.5607275"
] |
0.65848404
|
1
|
Retrieve all instance of OSLicence
|
def find_all():
return ItopapiPrototype.find_all(ItopapiOSLicence)
|
{
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
[
"def get_soma_objects(self):\n\n msg_store = MessageStoreProxy(database=\"soma2data\", collection=\"soma2\")\n objs = msg_store.query(SOMA2Object._type, message_query={\"map_name\":self.soma_map,\"config\":self.soma_conf})\n print \"queried soma2 objects >> \", objs\n self.soma_objects = ce.get_soma_objects()\n print \"hard coded objects >> \", [self.soma_objects[r].keys() for r in self.soma_objects.keys()]",
"def get_socios(self):\n return self.__socios",
"def list_silos(self, kwargs):\n verbose = kwargs.get(\"verbose\", False)\n attributes = ALL if verbose else [\"cn\", \"objectClass\"]\n\n self.display(\n self.engine.query(\n self.engine.SILOS_FILTER(),\n attributes, base=','.join([\"CN=AuthN Policy Configuration,CN=Services,CN=Configuration\", self.engine.base_dn])\n ),\n verbose\n )",
"def get_scnlist_all(self):\n logger.debug(\"Creating Database Engine and Session.\")\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n logger.debug(\"Perform query to find scenes which need downloading.\")\n query_result = ses.query(EDDSentinel1ASF).order_by(EDDSentinel1ASF.Acquisition_Date.asc()).all()\n scns = list()\n if query_result is not None:\n for record in query_result:\n scns.append(record.PID)\n ses.close()\n logger.debug(\"Closed the database session.\")\n return scns",
"def getobjsense(self): # 3\n res,resargs = self.__obj.getobjsense()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _sense_return_value = resargs\n _sense_return_value = objsense(_sense_return_value)\n return _sense_return_value",
"def listObjects(instance):\n # Get a cursor from the DB connection.\n cursor = Conection.connect(DB_USER, DB_PASSWD, instance, DB_HOST)\n \n # Compose the SQL query to find all the orbits/SSM objects. We do this with \n # a simle query to the derivedobjects table since we realy only need the\n # ssm_id values.\n maxMJD = completedPrecoveryMaxDate(instance)\n if(maxMJD == None):\n return([], None)\n \n sql = 'select distinct(ssm_id) from derivedobjects where ssm_id is not null'\n sql += ' and status = \"I\"'\n # sql += ' and updated >= \"%s\"' %(minModifiedDate)\n # <-- end if\n \n nRes = cursor.execute(sql)\n return([x[0] for x in cursor.fetchall()], float(maxMJD))",
"def list(self) -> List[Organisation]:\n ...",
"def GetObjects(self): \r\n return self.model.GetObjects()",
"def find_all():\n return ItopapiPrototype.find_all(ItopapiIncident)",
"def objects(self):",
"def iter_all(self):\n return self.opportunities.find()",
"def ls():\n return dynamodb.ls(OrganizationModel)",
"def __init__(self):\n self.incidents_models = {}\n self.risks = []\n self.incidents_models = None",
"def get_all_elections(self) -> list:",
"def find_all(self):\n pass",
"def get_common_food(cls):\n objs = cls.objects\n return objs",
"def list_instances(self):\n # list instances\n self._list_instances()",
"def get_all(self, name):\n\t\tpass",
"def get_instances(cls):\n raise NotImplementedError",
"def get_soma_rois(self):\n soma_map = \"collect_data_map_cleaned\"\n # soma_config = \"test\"\n # query = {\"map\":soma_map, \"config\":soma_config}\n all_rois = []\n ret = self.soma_roi_store.query(SOMA2ROIObject._type)\n for (roi, meta) in ret:\n if roi.map_name != soma_map: continue\n if roi.geotype != \"Polygon\": continue\n all_rois.append(roi)\n return all_rois",
"def list_circles(request):\n circles = Circle.objects.filter(is_public=True)\n serializers = CircleSerializer(circles, many=True)\n return Response(serializers.data)",
"def all(self):\n return self.__objects",
"def all(self):\n return self.__objects",
"def all(self):\n return self.__objects",
"def all(self):\n return self.__objects",
"def all(self):\n return self.__objects",
"def all(self):\n return self.__objects",
"def get_scnlist_con2ard(self):\n logger.debug(\"Creating Database Engine and Session.\")\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n\n logger.debug(\"Perform query to find scenes which need downloading.\")\n query_result = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.Downloaded == True,\n EDDSentinel1ASF.ARDProduct == False,\n EDDSentinel1ASF.Invalid == False).order_by(\n EDDSentinel1ASF.Acquisition_Date.asc()).all()\n\n scns2ard = list()\n if query_result is not None:\n for record in query_result:\n scns2ard.append(record.PID)\n ses.close()\n logger.debug(\"Closed the database session.\")\n return scns2ard",
"def get_all_locations(self):",
"def find_objs(self, cls, **attr):\n nodes = getattr(self.graph, getattr(models, cls).element_plural).query(**attr).all()\n return nodes"
] |
[
"0.6234363",
"0.5961881",
"0.595799",
"0.59259844",
"0.5838429",
"0.57797927",
"0.5702275",
"0.57011425",
"0.5666823",
"0.5649265",
"0.5632733",
"0.56231284",
"0.5603241",
"0.54869276",
"0.5477077",
"0.5447893",
"0.54415894",
"0.5405049",
"0.540362",
"0.5400546",
"0.5390554",
"0.53836954",
"0.53836954",
"0.53836954",
"0.53836954",
"0.53836954",
"0.53836954",
"0.5383241",
"0.5381411",
"0.5374819"
] |
0.60590714
|
1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.